From 05e65d94150a39fbca204d97f8e56c8c9487b3fe Mon Sep 17 00:00:00 2001 From: Reto Lehmann Date: Tue, 19 Mar 2024 11:43:40 +0100 Subject: [PATCH 01/16] Bump versions and reorder deps --- hack/update-deps.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hack/update-deps.sh b/hack/update-deps.sh index 356b9cebec..e483d2b458 100755 --- a/hack/update-deps.sh +++ b/hack/update-deps.sh @@ -15,10 +15,10 @@ set -o pipefail cd "${ROOT_DIR}" # This controls the knative release version we track. -KN_VERSION="release-1.11" -EVENTING_VERSION="release-v1.11" -EVENTING_KAFKA_BROKER_VERSION="release-v1.11" -SERVING_VERSION="release-v1.11" +KN_VERSION="release-1.13" +EVENTING_VERSION="release-v1.12" +EVENTING_KAFKA_BROKER_VERSION="release-v1.12" +SERVING_VERSION="release-v1.12" GO_VERSION="$(metadata.get requirements.golang)" OCP_VERSION="$(metadata.get requirements.ocpVersion.min)" @@ -29,13 +29,13 @@ FLOATING_DEPS=( ) REPLACE_DEPS=( - "knative.dev/eventing-kafka-broker=github.com/openshift-knative/eventing-kafka-broker@${EVENTING_KAFKA_BROKER_VERSION}" - "knative.dev/eventing=github.com/openshift-knative/eventing@${EVENTING_VERSION}" - "knative.dev/serving=github.com/openshift-knative/serving@${SERVING_VERSION}" "knative.dev/pkg=knative.dev/pkg@${KN_VERSION}" "knative.dev/hack=knative.dev/hack@${KN_VERSION}" "knative.dev/networking=knative.dev/networking@${KN_VERSION}" "knative.dev/reconciler-test=knative.dev/reconciler-test@${KN_VERSION}" + "knative.dev/eventing-kafka-broker=github.com/openshift-knative/eventing-kafka-broker@${EVENTING_KAFKA_BROKER_VERSION}" + "knative.dev/eventing=github.com/openshift-knative/eventing@${EVENTING_VERSION}" + "knative.dev/serving=github.com/openshift-knative/serving@${SERVING_VERSION}" "github.com/openshift/api=github.com/openshift/api@release-${OCP_VERSION}" "github.com/openshift/client-go=github.com/openshift/client-go@release-${OCP_VERSION}" "github.com/openshift/machine-config-operator=github.com/openshift/machine-config-operator@release-${OCP_VERSION}" From a01fb9132d4524ed61ebb997c8ae0f0a96f1f239 Mon Sep 17 00:00:00 2001 From: Reto Lehmann Date: Mon, 25 Mar 2024 12:56:48 +0100 Subject: [PATCH 02/16] Drop replaces for K8s deps --- go.mod | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/go.mod b/go.mod index dff6bea360..0b815b1730 100644 --- a/go.mod +++ b/go.mod @@ -199,14 +199,3 @@ replace ( github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20220603133046-984ee5ebedcf github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20231113175050-15d0b0288a43 ) - -replace ( - // Adjustments to align transitive deps - k8s.io/api => k8s.io/api v0.25.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.26.4 - k8s.io/client-go => k8s.io/client-go v0.25.4 - k8s.io/code-generator => k8s.io/code-generator v0.25.4 - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 - k8s.io/utils => k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.12.3 -) From 00c27d8e5f27b6d70d249bdbc0041f7f45bfa291 Mon Sep 17 00:00:00 2001 From: Reto Lehmann Date: Mon, 25 Mar 2024 13:04:41 +0100 Subject: [PATCH 03/16] Run `hack/update-deps.sh --upgrade` --- go.mod | 60 +- go.sum | 767 +- .../{Shopify => IBM}/sarama/.gitignore | 0 .../{Shopify => IBM}/sarama/.golangci.yml | 31 +- .../IBM/sarama/.pre-commit-config.yaml | 41 + vendor/github.com/IBM/sarama/CHANGELOG.md | 1719 +++ .../github.com/IBM/sarama/CODE_OF_CONDUCT.md | 128 + vendor/github.com/IBM/sarama/CONTRIBUTING.md | 77 + vendor/github.com/IBM/sarama/Dockerfile.kafka | 47 + .../sarama/LICENSE => IBM/sarama/LICENSE.md} | 4 + .../{Shopify => IBM}/sarama/Makefile | 0 .../{Shopify => IBM}/sarama/README.md | 18 +- vendor/github.com/IBM/sarama/SECURITY.md | 11 + .../{Shopify => IBM}/sarama/Vagrantfile | 0 .../{Shopify => IBM}/sarama/acl_bindings.go | 0 .../sarama/acl_create_request.go | 4 + .../sarama/acl_create_response.go | 18 +- .../sarama/acl_delete_request.go | 4 + .../sarama/acl_delete_response.go | 15 +- .../sarama/acl_describe_request.go | 6 +- .../sarama/acl_describe_response.go | 8 + .../{Shopify => IBM}/sarama/acl_filter.go | 0 .../{Shopify => IBM}/sarama/acl_types.go | 8 +- .../sarama/add_offsets_to_txn_request.go | 18 +- .../sarama/add_offsets_to_txn_response.go | 22 +- .../sarama/add_partitions_to_txn_request.go | 18 +- .../sarama/add_partitions_to_txn_response.go | 21 +- .../{Shopify => IBM}/sarama/admin.go | 208 +- .../sarama/alter_client_quotas_request.go | 7 +- .../sarama/alter_client_quotas_response.go | 11 +- .../sarama/alter_configs_request.go | 16 +- .../sarama/alter_configs_response.go | 40 +- .../alter_partition_reassignments_request.go | 4 + .../alter_partition_reassignments_response.go | 10 + .../alter_user_scram_credentials_request.go | 4 + .../alter_user_scram_credentials_response.go | 8 + .../sarama/api_versions_request.go | 14 +- .../sarama/api_versions_response.go | 20 +- .../{Shopify => IBM}/sarama/async_producer.go | 44 +- .../sarama/balance_strategy.go | 114 +- .../{Shopify => IBM}/sarama/broker.go | 202 +- .../{Shopify => IBM}/sarama/client.go | 250 +- .../{Shopify => IBM}/sarama/compress.go | 2 +- .../{Shopify => IBM}/sarama/config.go | 50 +- .../sarama/config_resource_type.go | 0 .../{Shopify => IBM}/sarama/consumer.go | 61 +- .../{Shopify => IBM}/sarama/consumer_group.go | 200 +- .../sarama/consumer_group_members.go | 60 +- .../sarama/consumer_metadata_request.go | 17 +- .../sarama/consumer_metadata_response.go | 18 +- .../{Shopify => IBM}/sarama/control_record.go | 0 .../{Shopify => IBM}/sarama/crc32_field.go | 0 .../sarama/create_partitions_request.go | 16 +- .../sarama/create_partitions_response.go | 20 +- .../sarama/create_topics_request.go | 35 +- .../sarama/create_topics_response.go | 26 +- vendor/github.com/IBM/sarama/decompress.go | 98 + .../sarama/delete_groups_request.go | 18 +- .../sarama/delete_groups_response.go | 20 +- .../sarama/delete_offsets_request.go | 7 +- .../sarama/delete_offsets_response.go | 11 +- .../sarama/delete_records_request.go | 14 +- .../sarama/delete_records_response.go | 17 +- .../sarama/delete_topics_request.go | 12 +- .../sarama/delete_topics_response.go | 16 +- .../sarama/describe_client_quotas_request.go | 7 +- .../sarama/describe_client_quotas_response.go | 11 +- .../sarama/describe_configs_request.go | 12 +- .../sarama/describe_configs_response.go | 29 +- .../sarama/describe_groups_request.go | 17 +- .../sarama/describe_groups_response.go | 23 +- .../sarama/describe_log_dirs_request.go | 7 + .../sarama/describe_log_dirs_response.go | 11 + ...describe_user_scram_credentials_request.go | 4 + ...escribe_user_scram_credentials_response.go | 8 + .../{Shopify => IBM}/sarama/dev.yml | 0 .../sarama/docker-compose.yml | 148 +- .../sarama/encoder_decoder.go | 0 .../sarama/end_txn_request.go | 16 +- .../sarama/end_txn_response.go | 20 +- vendor/github.com/IBM/sarama/entrypoint.sh | 31 + .../{Shopify => IBM}/sarama/errors.go | 190 +- .../{Shopify => IBM}/sarama/fetch_request.go | 51 +- .../{Shopify => IBM}/sarama/fetch_response.go | 51 +- .../sarama/find_coordinator_request.go | 6 + .../sarama/find_coordinator_response.go | 10 + .../sarama/gssapi_kerberos.go | 2 + .../sarama/heartbeat_request.go | 17 +- .../sarama/heartbeat_response.go | 21 +- .../incremental_alter_configs_request.go | 7 +- .../incremental_alter_configs_response.go | 11 +- .../sarama/init_producer_id_request.go | 20 +- .../sarama/init_producer_id_response.go | 18 +- .../{Shopify => IBM}/sarama/interceptors.go | 0 .../sarama/join_group_request.go | 52 +- .../sarama/join_group_response.go | 51 +- .../sarama/kerberos_client.go | 15 +- .../sarama/leave_group_request.go | 17 +- .../sarama/leave_group_response.go | 23 +- .../{Shopify => IBM}/sarama/length_field.go | 0 .../IBM/sarama/list_groups_request.go | 82 + .../IBM/sarama/list_groups_response.go | 173 + .../list_partition_reassignments_request.go | 4 + .../list_partition_reassignments_response.go | 10 + .../{Shopify => IBM}/sarama/message.go | 0 .../{Shopify => IBM}/sarama/message_set.go | 0 .../github.com/IBM/sarama/metadata_request.go | 240 + .../IBM/sarama/metadata_response.go | 537 + .../{Shopify => IBM}/sarama/metrics.go | 2 +- .../{Shopify => IBM}/sarama/mockbroker.go | 72 +- .../{Shopify => IBM}/sarama/mockkerberos.go | 0 .../{Shopify => IBM}/sarama/mockresponses.go | 149 +- .../sarama/offset_commit_request.go | 32 +- .../sarama/offset_commit_response.go | 30 +- .../sarama/offset_fetch_request.go | 69 +- .../sarama/offset_fetch_response.go | 40 +- .../{Shopify => IBM}/sarama/offset_manager.go | 82 +- .../{Shopify => IBM}/sarama/offset_request.go | 53 +- .../sarama/offset_response.go | 72 +- .../{Shopify => IBM}/sarama/packet_decoder.go | 2 +- .../{Shopify => IBM}/sarama/packet_encoder.go | 0 .../{Shopify => IBM}/sarama/partitioner.go | 31 + .../{Shopify => IBM}/sarama/prep_encoder.go | 0 .../sarama/produce_request.go | 30 +- .../sarama/produce_response.go | 27 +- .../{Shopify => IBM}/sarama/produce_set.go | 9 +- .../{Shopify => IBM}/sarama/quota_types.go | 0 .../{Shopify => IBM}/sarama/real_decoder.go | 0 .../{Shopify => IBM}/sarama/real_encoder.go | 0 .../{Shopify => IBM}/sarama/record.go | 0 .../{Shopify => IBM}/sarama/record_batch.go | 8 +- .../{Shopify => IBM}/sarama/records.go | 0 .../{Shopify => IBM}/sarama/request.go | 98 +- .../sarama/response_header.go | 0 .../{Shopify => IBM}/sarama/sarama.go | 50 +- .../sarama/sasl_authenticate_request.go | 4 + .../sarama/sasl_authenticate_response.go | 4 + .../sarama/sasl_handshake_request.go | 11 +- .../sarama/sasl_handshake_response.go | 14 +- .../sarama/scram_formatter.go | 0 .../sarama/sticky_assignor_user_data.go | 0 .../sarama/sync_group_request.go | 17 +- .../sarama/sync_group_response.go | 23 +- .../{Shopify => IBM}/sarama/sync_producer.go | 2 +- .../{Shopify => IBM}/sarama/timestamp.go | 0 .../sarama/transaction_manager.go | 83 +- .../sarama/txn_offset_commit_request.go | 44 +- .../sarama/txn_offset_commit_response.go | 23 +- .../{Shopify => IBM}/sarama/utils.go | 46 +- .../{Shopify => IBM}/sarama/version.go | 0 vendor/github.com/IBM/sarama/zstd.go | 74 + vendor/github.com/Shopify/sarama/CHANGELOG.md | 1187 -- .../Shopify/sarama/Dockerfile.kafka | 27 - .../github.com/Shopify/sarama/decompress.go | 61 - .../github.com/Shopify/sarama/entrypoint.sh | 26 - .../Shopify/sarama/list_groups_request.go | 27 - .../Shopify/sarama/list_groups_response.go | 73 - .../Shopify/sarama/metadata_request.go | 85 - .../Shopify/sarama/metadata_response.go | 325 - vendor/github.com/Shopify/sarama/zstd.go | 50 - vendor/github.com/coreos/go-oidc/v3/LICENSE | 202 + vendor/github.com/coreos/go-oidc/v3/NOTICE | 5 + .../github.com/coreos/go-oidc/v3/oidc/jose.go | 17 + .../github.com/coreos/go-oidc/v3/oidc/jwks.go | 250 + .../github.com/coreos/go-oidc/v3/oidc/oidc.go | 554 + .../coreos/go-oidc/v3/oidc/verify.go | 356 + .../json-patch/v5/internal/json/decode.go | 1385 ++ .../json-patch/v5/internal/json/encode.go | 1473 ++ .../json-patch/v5/internal/json/fold.go | 141 + .../json-patch/v5/internal/json/fuzz.go | 42 + .../json-patch/v5/internal/json/indent.go | 143 + .../json-patch/v5/internal/json/scanner.go | 610 + .../json-patch/v5/internal/json/stream.go | 515 + .../json-patch/v5/internal/json/tables.go | 218 + .../json-patch/v5/internal/json/tags.go | 38 + .../github.com/evanphx/json-patch/v5/merge.go | 58 +- .../github.com/evanphx/json-patch/v5/patch.go | 372 +- .../github.com/go-jose/go-jose/v3/.gitignore | 2 + .../go-jose/go-jose/v3/.golangci.yml | 53 + .../github.com/go-jose/go-jose/v3/.travis.yml | 33 + .../go-jose/go-jose/v3/CHANGELOG.md | 78 + .../go-jose/go-jose/v3/CONTRIBUTING.md | 15 + vendor/github.com/go-jose/go-jose/v3/LICENSE | 202 + .../github.com/go-jose/go-jose/v3/README.md | 108 + .../github.com/go-jose/go-jose/v3/SECURITY.md | 13 + .../go-jose/go-jose/v3/asymmetric.go | 595 + .../go-jose/go-jose/v3/cipher/cbc_hmac.go | 196 + .../go-jose/go-jose/v3/cipher/concat_kdf.go | 75 + .../go-jose/go-jose/v3/cipher/ecdh_es.go | 86 + .../go-jose/go-jose/v3/cipher/key_wrap.go | 109 + .../github.com/go-jose/go-jose/v3/crypter.go | 593 + .../go-jose/go-jose/v3}/doc.go | 17 +- .../github.com/go-jose/go-jose/v3/encoding.go | 237 + .../go-jose/go-jose/v3/json/LICENSE | 27 + .../go-jose/go-jose/v3/json/README.md | 13 + .../go-jose/go-jose/v3/json/decode.go | 1216 ++ .../go-jose/go-jose/v3/json/encode.go | 1197 ++ .../go-jose/go-jose/v3/json/indent.go | 141 + .../go-jose/go-jose/v3/json/scanner.go | 623 + .../go-jose/go-jose/v3/json/stream.go | 484 + .../go-jose/go-jose/v3/json/tags.go | 44 + vendor/github.com/go-jose/go-jose/v3/jwe.go | 295 + vendor/github.com/go-jose/go-jose/v3/jwk.go | 812 ++ vendor/github.com/go-jose/go-jose/v3/jws.go | 369 + .../go-jose/go-jose/v3/jwt/builder.go | 334 + .../go-jose/go-jose/v3/jwt/claims.go | 130 + .../go-jose/go-jose/v3/jwt}/doc.go | 12 +- .../go-jose/go-jose/v3/jwt/errors.go | 53 + .../github.com/go-jose/go-jose/v3/jwt/jwt.go | 133 + .../go-jose/go-jose/v3/jwt/validation.go | 120 + .../github.com/go-jose/go-jose/v3/opaque.go | 144 + .../github.com/go-jose/go-jose/v3/shared.go | 525 + .../github.com/go-jose/go-jose/v3/signing.go | 487 + .../go-jose/go-jose/v3/symmetric.go | 505 + vendor/github.com/gobuffalo/flect/README.md | 51 +- .../github.com/gobuffalo/flect/SHOULDERS.md | 10 +- vendor/github.com/gobuffalo/flect/camelize.go | 4 - .../github.com/gobuffalo/flect/custom_data.go | 5 + vendor/github.com/gobuffalo/flect/humanize.go | 10 +- .../github.com/gobuffalo/flect/pascalize.go | 11 +- .../gobuffalo/flect/plural_rules.go | 619 +- .../github.com/gobuffalo/flect/pluralize.go | 15 +- vendor/github.com/gobuffalo/flect/rule.go | 7 + .../gobuffalo/flect/singular_rules.go | 27 +- .../github.com/gobuffalo/flect/singularize.go | 30 +- vendor/github.com/gobuffalo/flect/titleize.go | 14 +- .../github.com/gobuffalo/flect/underscore.go | 12 +- vendor/github.com/gobuffalo/flect/version.go | 2 +- .../{gnostic => gnostic-models}/LICENSE | 0 .../compiler/README.md | 0 .../compiler/context.go | 0 .../compiler/error.go | 0 .../compiler/extensions.go | 2 +- .../compiler/helpers.go | 2 +- .../compiler/main.go | 0 .../compiler/reader.go | 0 .../extensions/README.md | 0 .../extensions/extension.pb.go | 4 +- .../extensions/extension.proto | 0 .../extensions/extensions.go | 0 .../jsonschema/README.md | 0 .../jsonschema/base.go | 15 +- .../jsonschema/display.go | 17 +- .../jsonschema/models.go | 8 +- .../jsonschema/operations.go | 0 .../jsonschema/reader.go | 1 + .../jsonschema/schema.json | 0 .../jsonschema/writer.go | 30 +- .../openapiv2/OpenAPIv2.go | 9 +- .../openapiv2/OpenAPIv2.pb.go | 4 +- .../openapiv2/OpenAPIv2.proto | 0 .../openapiv2/README.md | 0 .../openapiv2/document.go | 2 +- .../openapiv2/openapi-2.0.json | 0 .../openapiv3/OpenAPIv3.go | 9 +- .../openapiv3/OpenAPIv3.pb.go | 13 +- .../openapiv3/OpenAPIv3.proto | 2 +- .../openapiv3/README.md | 4 - .../openapiv3/document.go | 2 +- .../gnostic/openapiv3/annotations.pb.go | 183 - .../gnostic/openapiv3/annotations.proto | 60 - .../google/gnostic/openapiv3/openapi-3.0.json | 1251 -- .../google/gnostic/openapiv3/openapi-3.1.json | 1250 -- .../gorilla/websocket/.editorconfig | 20 + .../github.com/gorilla/websocket/.gitignore | 26 +- .../gorilla/websocket/.golangci.yml | 3 + vendor/github.com/gorilla/websocket/AUTHORS | 9 - vendor/github.com/gorilla/websocket/LICENSE | 39 +- vendor/github.com/gorilla/websocket/Makefile | 34 + vendor/github.com/gorilla/websocket/README.md | 19 +- vendor/github.com/gorilla/websocket/client.go | 44 +- .../gorilla/websocket/compression.go | 9 +- vendor/github.com/gorilla/websocket/conn.go | 83 +- vendor/github.com/gorilla/websocket/mask.go | 4 + vendor/github.com/gorilla/websocket/proxy.go | 17 +- vendor/github.com/gorilla/websocket/server.go | 42 +- .../gorilla/websocket/tls_handshake.go | 3 - .../gorilla/websocket/tls_handshake_116.go | 21 - vendor/github.com/gorilla/websocket/util.go | 19 +- .../gorilla/websocket/x_net_proxy.go | 473 - .../klauspost/compress/flate/deflate.go | 1017 ++ .../klauspost/compress/flate/dict_decoder.go | 184 + .../klauspost/compress/flate/fast_encoder.go | 193 + .../compress/flate/huffman_bit_writer.go | 1182 ++ .../klauspost/compress/flate/huffman_code.go | 417 + .../compress/flate/huffman_sortByFreq.go | 159 + .../compress/flate/huffman_sortByLiteral.go | 201 + .../klauspost/compress/flate/inflate.go | 829 ++ .../klauspost/compress/flate/inflate_gen.go | 1283 ++ .../klauspost/compress/flate/level1.go | 241 + .../klauspost/compress/flate/level2.go | 214 + .../klauspost/compress/flate/level3.go | 241 + .../klauspost/compress/flate/level4.go | 221 + .../klauspost/compress/flate/level5.go | 708 + .../klauspost/compress/flate/level6.go | 325 + .../compress/flate/matchlen_amd64.go | 16 + .../klauspost/compress/flate/matchlen_amd64.s | 68 + .../compress/flate/matchlen_generic.go | 33 + .../klauspost/compress/flate/regmask_amd64.go | 37 + .../klauspost/compress/flate/regmask_other.go | 40 + .../klauspost/compress/flate/stateless.go | 318 + .../klauspost/compress/flate/token.go | 379 + .../klauspost/compress/gzip/gunzip.go | 380 + .../klauspost/compress/gzip/gzip.go | 290 + .../manifestival/manifestival/manifestival.go | 10 +- ...ersion-operator_01_clusterversion.crd.yaml | 15 +- .../0000_03_config-operator_01_proxy.crd.yaml | 2 +- ...rketplace-operator_01_operatorhub.crd.yaml | 2 +- ...0_10_config-operator_01_apiserver.crd.yaml | 16 +- .../0000_10_config-operator_01_build.crd.yaml | 8 +- .../0000_10_config-operator_01_dns.crd.yaml | 40 + ...erator_01_infrastructure-Default.crd.yaml} | 97 +- ...frastructure-TechPreviewNoUpgrade.crd.yaml | 761 ++ ...ucture-TechPreviewNoUpgrade.crd.yaml-patch | 24 + ...000_10_config-operator_01_ingress.crd.yaml | 54 +- .../0000_10_config-operator_01_oauth.crd.yaml | 8 +- .../openshift/api/config/v1/Makefile | 3 + .../config/v1/stable.apiserver.testsuite.yaml | 16 + .../v1/stable.authentication.testsuite.yaml | 14 + .../api/config/v1/stable.build.testsuite.yaml | 14 + .../v1/stable.clusteroperator.testsuite.yaml | 14 + .../v1/stable.clusterversion.testsuite.yaml | 16 + .../config/v1/stable.console.testsuite.yaml | 14 + .../api/config/v1/stable.dns.testsuite.yaml | 105 + .../v1/stable.featuregate.testsuite.yaml | 14 + .../api/config/v1/stable.image.testsuite.yaml | 14 + .../stable.imagecontentpolicy.testsuite.yaml | 14 + .../v1/stable.infrastructure.testsuite.yaml | 14 + .../config/v1/stable.ingress.testsuite.yaml | 14 + .../config/v1/stable.network.testsuite.yaml | 14 + .../api/config/v1/stable.node.testsuite.yaml | 14 + .../api/config/v1/stable.oauth.testsuite.yaml | 14 + .../v1/stable.operatorhub.testsuite.yaml | 14 + .../config/v1/stable.project.testsuite.yaml | 14 + .../api/config/v1/stable.proxy.testsuite.yaml | 14 + .../config/v1/stable.scheduler.testsuite.yaml | 14 + .../techpreview.infrastructure.testsuite.yaml | 14 + .../api/config/v1/types_cluster_version.go | 52 +- .../openshift/api/config/v1/types_dns.go | 37 + .../openshift/api/config/v1/types_feature.go | 28 +- .../v1/types_image_digest_mirror_set.go | 5 + .../config/v1/types_image_tag_mirror_set.go | 5 + .../api/config/v1/types_infrastructure.go | 322 +- .../openshift/api/config/v1/types_ingress.go | 76 + .../api/config/v1/zz_generated.deepcopy.go | 307 +- .../v1/zz_generated.swagger_doc_generated.go | 174 +- ...ig-operator_01_insightsdatagather.crd.yaml | 62 + .../openshift/api/config/v1alpha1/Makefile | 3 + .../openshift/api/config/v1alpha1/doc.go | 8 + .../openshift/api/config/v1alpha1/register.go | 38 + ...hpreview.insightsdatagather.testsuite.yaml | 14 + .../api/config/v1alpha1/types_insights.go | 76 + .../config/v1alpha1/zz_generated.deepcopy.go | 125 + .../zz_generated.swagger_doc_generated.go | 50 + .../v1/0000_10_consoleclidownload.crd.yaml | 1 + .../0000_10_consoleexternalloglink.crd.yaml | 3 +- .../console/v1/0000_10_consolelink.crd.yaml | 2 + .../v1/0000_10_consolenotification.crd.yaml | 1 + .../v1/0000_10_consolequickstart.crd.yaml | 1 + .../v1/0000_10_consoleyamlsample.crd.yaml | 1 + .../console/v1/0000_51_consoleplugin.crd.yaml | 294 + .../openshift/api/console/v1/Makefile | 3 + .../openshift/api/console/v1/register.go | 2 + .../stable.consoleclidownload.testsuite.yaml | 20 + ...able.consoleexternalloglink.testsuite.yaml | 18 + .../v1/stable.consolelink.testsuite.yaml | 20 + .../stable.consolenotification.testsuite.yaml | 16 + .../v1/stable.consoleplugin.testsuite.yaml | 88 + .../stable.consolequickstart.testsuite.yaml | 28 + .../stable.consoleyamlsample.testsuite.yaml | 26 + .../api/console/v1/types_console_plugin.go | 238 + .../api/console/v1/zz_generated.deepcopy.go | 192 + .../v1/zz_generated.swagger_doc_generated.go | 92 + .../v1alpha1/0000_10_consoleplugin.crd.yaml | 132 - .../v1alpha1/0000_51_consoleplugin.crd.yaml | 294 + .../openshift/api/console/v1alpha1/Makefile | 3 + .../stable.consoleplugin.testsuite.yaml | 23 + .../openshift/api/route/v1/Makefile | 3 + .../openshift/api/route/v1/generated.proto | 26 +- .../openshift/api/route/v1/route.crd.yaml | 298 + .../api/route/v1/route.crd.yaml-patch | 86 + .../api/route/v1/stable.route.testsuite.yaml | 22 + .../api/route/v1/test-route-validation.sh | 476 + .../openshift/api/route/v1/types.go | 28 +- .../v1/zz_generated.swagger_doc_generated.go | 2 +- .../config/v1/alibabacloudplatformstatus.go | 46 + .../config/v1/alibabacloudresourcetag.go | 32 + .../config/v1/apiserver.go | 240 + .../config/v1/apiserverencryption.go | 27 + .../config/v1/apiservernamedservingcert.go | 34 + .../config/v1/apiserverservingcerts.go | 28 + .../config/v1/apiserverspec.go | 70 + .../applyconfigurations/config/v1/audit.go | 41 + .../config/v1/auditcustomrule.go | 36 + .../config/v1/authentication.go | 240 + .../config/v1/authenticationspec.go | 68 + .../config/v1/authenticationstatus.go | 23 + .../config/v1/awsingressspec.go | 27 + .../config/v1/awsplatformspec.go | 28 + .../config/v1/awsplatformstatus.go | 51 + .../config/v1/awsresourcetag.go | 32 + .../config/v1/awsserviceendpoint.go | 32 + .../config/v1/azureplatformstatus.go | 54 + .../config/v1/baremetalplatformstatus.go | 63 + .../config/v1/basicauthidentityprovider.go | 47 + .../applyconfigurations/config/v1/build.go | 231 + .../config/v1/builddefaults.go | 70 + .../config/v1/buildoverrides.go | 67 + .../config/v1/buildspec.go | 41 + .../config/v1/clustercondition.go | 32 + .../config/v1/clusternetworkentry.go | 32 + .../config/v1/clusteroperator.go | 240 + .../config/v1/clusteroperatorstatus.go | 69 + .../v1/clusteroperatorstatuscondition.go | 64 + .../config/v1/clusterversion.go | 240 + .../v1/clusterversioncapabilitiesspec.go | 38 + .../v1/clusterversioncapabilitiesstatus.go | 40 + .../config/v1/clusterversionspec.go | 77 + .../config/v1/clusterversionstatus.go | 106 + .../config/v1/componentoverride.go | 59 + .../config/v1/componentroutespec.go | 54 + .../config/v1/componentroutestatus.go | 93 + .../config/v1/conditionalupdate.go | 52 + .../config/v1/conditionalupdaterisk.go | 55 + .../config/v1/configmapfilereference.go | 32 + .../config/v1/configmapnamereference.go | 23 + .../applyconfigurations/config/v1/console.go | 240 + .../config/v1/consoleauthentication.go | 23 + .../config/v1/consolespec.go | 23 + .../config/v1/consolestatus.go | 23 + .../config/v1/customfeaturegates.go | 36 + .../config/v1/customtlsprofile.go | 37 + .../v1/deprecatedwebhooktokenauthenticator.go | 23 + .../applyconfigurations/config/v1/dns.go | 240 + .../applyconfigurations/config/v1/dnsspec.go | 41 + .../applyconfigurations/config/v1/dnszone.go | 38 + .../config/v1/equinixmetalplatformstatus.go | 32 + .../config/v1/externalipconfig.go | 34 + .../config/v1/externalippolicy.go | 36 + .../config/v1/featuregate.go | 240 + .../config/v1/featuregateselection.go | 36 + .../config/v1/featuregatespec.go | 35 + .../config/v1/gcpplatformstatus.go | 32 + .../config/v1/githubidentityprovider.go | 72 + .../config/v1/gitlabidentityprovider.go | 50 + .../config/v1/googleidentityprovider.go | 41 + .../config/v1/htpasswdidentityprovider.go | 23 + .../config/v1/hubsource.go | 32 + .../config/v1/hubsourcestatus.go | 57 + .../config/v1/ibmcloudplatformstatus.go | 63 + .../config/v1/identityprovider.go | 117 + .../config/v1/identityproviderconfig.go | 108 + .../applyconfigurations/config/v1/image.go | 240 + .../config/v1/imagecontentpolicy.go | 231 + .../config/v1/imagecontentpolicyspec.go | 28 + .../config/v1/imagedigestmirrors.go | 47 + .../config/v1/imagedigestmirrorset.go | 240 + .../config/v1/imagedigestmirrorsetspec.go | 28 + .../config/v1/imagelabel.go | 32 + .../config/v1/imagespec.go | 57 + .../config/v1/imagestatus.go | 34 + .../config/v1/imagetagmirrors.go | 47 + .../config/v1/imagetagmirrorset.go | 240 + .../config/v1/imagetagmirrorsetspec.go | 28 + .../config/v1/infrastructure.go | 240 + .../config/v1/infrastructurespec.go | 32 + .../config/v1/infrastructurestatus.go | 90 + .../applyconfigurations/config/v1/ingress.go | 240 + .../config/v1/ingressplatformspec.go | 36 + .../config/v1/ingressspec.go | 69 + .../config/v1/ingressstatus.go | 41 + .../config/v1/keystoneidentityprovider.go | 56 + .../config/v1/kubevirtplatformstatus.go | 32 + .../config/v1/ldapattributemapping.go | 58 + .../config/v1/ldapidentityprovider.go | 68 + .../config/v1/loadbalancer.go | 23 + .../config/v1/maxagepolicy.go | 32 + .../config/v1/mtumigration.go | 32 + .../config/v1/mtumigrationvalues.go | 32 + .../applyconfigurations/config/v1/network.go | 240 + .../config/v1/networkmigration.go | 32 + .../config/v1/networkspec.go | 66 + .../config/v1/networkstatus.go | 66 + .../applyconfigurations/config/v1/node.go | 240 + .../applyconfigurations/config/v1/nodespec.go | 36 + .../config/v1/nutanixplatformspec.go | 37 + .../config/v1/nutanixplatformstatus.go | 54 + .../config/v1/nutanixprismelementendpoint.go | 32 + .../config/v1/nutanixprismendpoint.go | 32 + .../applyconfigurations/config/v1/oauth.go | 240 + .../config/v1/oauthremoteconnectioninfo.go | 50 + .../config/v1/oauthspec.go | 46 + .../config/v1/oauthtemplates.go | 41 + .../config/v1/objectreference.go | 50 + .../config/v1/openidclaims.go | 62 + .../config/v1/openididentityprovider.go | 85 + .../config/v1/openstackplatformstatus.go | 72 + .../config/v1/operandversion.go | 32 + .../config/v1/operatorhub.go | 240 + .../config/v1/operatorhubspec.go | 37 + .../config/v1/operatorhubstatus.go | 28 + .../config/v1/ovirtplatformstatus.go | 63 + .../config/v1/platformspec.go | 144 + .../config/v1/platformstatus.go | 144 + .../config/v1/powervsplatformspec.go | 28 + .../config/v1/powervsplatformstatus.go | 64 + .../config/v1/powervsserviceendpoint.go | 32 + .../applyconfigurations/config/v1/project.go | 240 + .../config/v1/projectspec.go | 32 + .../config/v1/promqlclustercondition.go | 23 + .../applyconfigurations/config/v1/proxy.go | 240 + .../config/v1/proxyspec.go | 61 + .../config/v1/proxystatus.go | 41 + .../config/v1/registrylocation.go | 32 + .../config/v1/registrysources.go | 58 + .../applyconfigurations/config/v1/release.go | 56 + .../config/v1/repositorydigestmirrors.go | 47 + .../v1/requestheaderidentityprovider.go | 96 + .../config/v1/requiredhstspolicy.go | 66 + .../config/v1/scheduler.go | 240 + .../config/v1/schedulerspec.go | 54 + .../config/v1/secretnamereference.go | 23 + .../config/v1/templatereference.go | 23 + .../config/v1/tlsprofilespec.go | 38 + .../config/v1/tlssecurityprofile.go | 63 + .../config/v1/tokenconfig.go | 45 + .../applyconfigurations/config/v1/update.go | 41 + .../config/v1/updatehistory.go | 82 + .../v1/vsphereplatformfailuredomainspec.go | 59 + .../v1/vsphereplatformnodenetworking.go | 32 + .../v1/vsphereplatformnodenetworkingspec.go | 45 + .../config/v1/vsphereplatformspec.go | 51 + .../config/v1/vsphereplatformstatus.go | 63 + .../config/v1/vsphereplatformtopology.go | 70 + .../config/v1/vsphereplatformvcenterspec.go | 43 + .../config/v1/webhooktokenauthenticator.go | 23 + .../applyconfigurations/internal/internal.go | 3268 +++++ .../clientset/versioned/scheme/register.go | 16 +- .../versioned/typed/config/v1/apiserver.go | 59 + .../typed/config/v1/authentication.go | 59 + .../versioned/typed/config/v1/build.go | 29 + .../typed/config/v1/clusteroperator.go | 59 + .../typed/config/v1/clusterversion.go | 59 + .../versioned/typed/config/v1/console.go | 59 + .../versioned/typed/config/v1/dns.go | 59 + .../versioned/typed/config/v1/featuregate.go | 59 + .../versioned/typed/config/v1/image.go | 59 + .../typed/config/v1/imagecontentpolicy.go | 29 + .../typed/config/v1/imagedigestmirrorset.go | 75 + .../typed/config/v1/imagetagmirrorset.go | 75 + .../typed/config/v1/infrastructure.go | 59 + .../versioned/typed/config/v1/ingress.go | 59 + .../versioned/typed/config/v1/network.go | 59 + .../versioned/typed/config/v1/node.go | 59 + .../versioned/typed/config/v1/oauth.go | 59 + .../versioned/typed/config/v1/operatorhub.go | 59 + .../versioned/typed/config/v1/project.go | 59 + .../versioned/typed/config/v1/proxy.go | 59 + .../versioned/typed/config/v1/scheduler.go | 59 + .../console/v1/applicationmenuspec.go | 32 + .../console/v1/clidownloadlink.go | 32 + .../console/v1/consoleclidownload.go | 231 + .../console/v1/consoleclidownloadspec.go | 46 + .../console/v1/consoleexternalloglink.go | 231 + .../console/v1/consoleexternalloglinkspec.go | 41 + .../console/v1/consolelink.go | 231 + .../console/v1/consolelinkspec.go | 62 + .../console/v1/consolenotification.go | 231 + .../console/v1/consolenotificationspec.go | 63 + .../console/v1/consoleplugin.go | 231 + .../console/v1/consolepluginbackend.go | 36 + .../console/v1/consoleplugini18n.go | 27 + .../console/v1/consolepluginproxy.go | 54 + .../console/v1/consolepluginproxyendpoint.go | 36 + .../v1/consolepluginproxyserviceconfig.go | 41 + .../console/v1/consolepluginservice.go | 50 + .../console/v1/consolepluginspec.go | 55 + .../console/v1/consolequickstart.go | 231 + .../console/v1/consolequickstartspec.go | 130 + .../console/v1/consolequickstarttask.go | 50 + .../console/v1/consolequickstarttaskreview.go | 32 + .../v1/consolequickstarttasksummary.go | 32 + .../console/v1/consoleyamlsample.go | 231 + .../console/v1/consoleyamlsamplespec.go | 64 + .../applyconfigurations/console/v1/link.go | 32 + .../console/v1/namespacedashboardspec.go | 38 + .../applyconfigurations/internal/internal.go | 787 ++ .../clientset/versioned/scheme/register.go | 14 +- .../typed/console/v1/console_client.go | 5 + .../typed/console/v1/consoleclidownload.go | 29 + .../console/v1/consoleexternalloglink.go | 29 + .../versioned/typed/console/v1/consolelink.go | 29 + .../typed/console/v1/consolenotification.go | 29 + .../typed/console/v1/consoleplugin.go | 181 + .../typed/console/v1/consolequickstart.go | 29 + .../typed/console/v1/consoleyamlsample.go | 29 + .../typed/console/v1/generated_expansion.go | 2 + .../applyconfigurations/internal/internal.go | 321 + .../applyconfigurations/route/v1/route.go | 242 + .../route/v1/routeingress.go | 68 + .../route/v1/routeingresscondition.go | 65 + .../applyconfigurations/route/v1/routeport.go | 27 + .../applyconfigurations/route/v1/routespec.go | 95 + .../route/v1/routestatus.go | 28 + .../route/v1/routetargetreference.go | 41 + .../applyconfigurations/route/v1/tlsconfig.go | 72 + .../clientset/versioned/scheme/register.go | 14 +- .../versioned/typed/route/v1/route.go | 61 + .../v1/helpers.go | 2 +- .../v1/types.go | 28 +- .../clientset/versioned/scheme/register.go | 14 +- vendor/github.com/rs/dnscache/.travis.yml | 13 + vendor/github.com/rs/dnscache/LICENSE | 21 + vendor/github.com/rs/dnscache/README.md | 78 + vendor/github.com/rs/dnscache/dnscache.go | 275 + vendor/github.com/tsenart/vegeta/v12/LICENSE | 2 +- .../tsenart/vegeta/v12/lib/attack.go | 217 +- .../tsenart/vegeta/v12/lib/attack_fuzz.go | 6 +- .../tsenart/vegeta/v12/lib/pacer.go | 80 +- .../vegeta/v12/lib/results_easyjson.go | 13 +- .../tsenart/vegeta/v12/lib/results_fuzz.go | 1 + .../tsenart/vegeta/v12/lib/target.schema.json | 12 +- .../tsenart/vegeta/v12/lib/targets.go | 39 +- .../tsenart/vegeta/v12/lib/targets_fuzz.go | 1 + .../tsenart/vegeta/v12/lib/util_fuzz.go | 1 + .../golang => golang.org/x/exp}/LICENSE | 0 .../golang => golang.org/x/exp}/PATENTS | 0 vendor/golang.org/x/exp/maps/maps.go | 94 + .../x/sync/singleflight/singleflight.go | 214 + .../x/tools/go/gcexportdata/gcexportdata.go | 186 - .../x/tools/go/gcexportdata/importer.go | 75 - .../tools/go/internal/packagesdriver/sizes.go | 53 - vendor/golang.org/x/tools/go/packages/doc.go | 240 - .../x/tools/go/packages/external.go | 101 - .../golang.org/x/tools/go/packages/golist.go | 1107 -- .../x/tools/go/packages/golist_overlay.go | 83 - .../x/tools/go/packages/loadmode_string.go | 57 - .../x/tools/go/packages/packages.go | 1347 -- .../golang.org/x/tools/go/packages/visit.go | 59 - .../x/tools/go/types/objectpath/objectpath.go | 752 -- .../x/tools/internal/gcimporter/bimport.go | 150 - .../x/tools/internal/gcimporter/exportdata.go | 99 - .../x/tools/internal/gcimporter/gcimporter.go | 273 - .../x/tools/internal/gcimporter/iexport.go | 1321 -- .../x/tools/internal/gcimporter/iimport.go | 1082 -- .../internal/gcimporter/newInterface10.go | 22 - .../internal/gcimporter/newInterface11.go | 14 - .../internal/gcimporter/support_go117.go | 16 - .../internal/gcimporter/support_go118.go | 37 - .../x/tools/internal/gcimporter/unified_no.go | 10 - .../tools/internal/gcimporter/unified_yes.go | 10 - .../x/tools/internal/gcimporter/ureader_no.go | 19 - .../tools/internal/gcimporter/ureader_yes.go | 728 - .../internal/packagesinternal/packages.go | 22 - .../x/tools/internal/pkgbits/codes.go | 77 - .../x/tools/internal/pkgbits/decoder.go | 517 - .../x/tools/internal/pkgbits/doc.go | 32 - .../x/tools/internal/pkgbits/encoder.go | 383 - .../x/tools/internal/pkgbits/flags.go | 9 - .../x/tools/internal/pkgbits/frames_go1.go | 21 - .../x/tools/internal/pkgbits/frames_go17.go | 28 - .../x/tools/internal/pkgbits/reloc.go | 42 - .../x/tools/internal/pkgbits/support.go | 17 - .../x/tools/internal/pkgbits/sync.go | 113 - .../internal/pkgbits/syncmarker_string.go | 89 - .../internal/tokeninternal/tokeninternal.go | 151 - .../x/tools/internal/typeparams/common.go | 204 - .../x/tools/internal/typeparams/coretype.go | 122 - .../x/tools/internal/typeparams/normalize.go | 218 - .../x/tools/internal/typeparams/termlist.go | 163 - .../x/tools/internal/typeparams/typeterm.go | 169 - .../tools/internal/typesinternal/errorcode.go | 1560 --- .../typesinternal/errorcode_string.go | 179 - .../x/tools/internal/typesinternal/types.go | 52 - .../tools/internal/typesinternal/types_118.go | 19 - .../x/tools/internal/versions/gover.go | 172 - .../x/tools/internal/versions/types.go | 19 - .../x/tools/internal/versions/types_go121.go | 20 - .../x/tools/internal/versions/types_go122.go | 24 - .../tools/internal/versions/versions_go121.go | 49 - .../tools/internal/versions/versions_go122.go | 38 - .../gomodules.xyz/jsonpatch/v2/jsonpatch.go | 4 + vendor/istio.io/api/LICENSE | 2 +- .../api/analysis/v1alpha1/message.pb.go | 2 +- .../api/analysis/v1alpha1/message.pb.html | 6 +- .../analysis/v1alpha1/message_deepcopy.gen.go | 2 +- .../api/extensions/v1alpha1/wasm.gen.json | 47 +- .../api/extensions/v1alpha1/wasm.pb.go | 282 +- .../api/extensions/v1alpha1/wasm.pb.html | 135 +- .../api/extensions/v1alpha1/wasm.proto | 91 +- .../extensions/v1alpha1/wasm_deepcopy.gen.go | 23 +- .../api/extensions/v1alpha1/wasm_json.gen.go | 11 + .../istio.io/api/meta/v1alpha1/status.pb.go | 2 +- .../istio.io/api/meta/v1alpha1/status.pb.html | 4 +- .../api/meta/v1alpha1/status_deepcopy.gen.go | 2 +- .../v1alpha3/destination_rule.gen.json | 259 +- .../v1alpha3/destination_rule.pb.go | 1495 ++- .../v1alpha3/destination_rule.pb.html | 335 +- .../v1alpha3/destination_rule.proto | 155 +- .../v1alpha3/destination_rule_deepcopy.gen.go | 44 +- .../v1alpha3/destination_rule_json.gen.go | 22 + .../networking/v1alpha3/envoy_filter.gen.json | 7 +- .../networking/v1alpha3/envoy_filter.pb.go | 455 +- .../networking/v1alpha3/envoy_filter.pb.html | 99 +- .../networking/v1alpha3/envoy_filter.proto | 65 +- .../v1alpha3/envoy_filter_deepcopy.gen.go | 2 +- .../api/networking/v1alpha3/gateway.gen.json | 13 +- .../api/networking/v1alpha3/gateway.pb.go | 395 +- .../api/networking/v1alpha3/gateway.pb.html | 113 +- .../api/networking/v1alpha3/gateway.proto | 52 +- .../v1alpha3/gateway_deepcopy.gen.go | 2 +- .../v1alpha3/service_entry.gen.json | 46 +- .../networking/v1alpha3/service_entry.pb.go | 241 +- .../networking/v1alpha3/service_entry.pb.html | 197 +- .../networking/v1alpha3/service_entry.proto | 42 +- .../v1alpha3/service_entry_deepcopy.gen.go | 23 +- .../v1alpha3/service_entry_json.gen.go | 11 + .../api/networking/v1alpha3/sidecar.gen.json | 19 +- .../api/networking/v1alpha3/sidecar.pb.go | 131 +- .../api/networking/v1alpha3/sidecar.pb.html | 87 +- .../api/networking/v1alpha3/sidecar.proto | 17 +- .../v1alpha3/sidecar_deepcopy.gen.go | 2 +- .../v1alpha3/virtual_service.gen.json | 83 +- .../networking/v1alpha3/virtual_service.pb.go | 2676 ++-- .../v1alpha3/virtual_service.pb.html | 557 +- .../networking/v1alpha3/virtual_service.proto | 221 +- .../v1alpha3/virtual_service_deepcopy.gen.go | 44 +- .../v1alpha3/virtual_service_json.gen.go | 22 + .../networking/v1alpha3/workload_entry.pb.go | 64 +- .../v1alpha3/workload_entry.pb.html | 31 - .../v1alpha3/workload_entry_deepcopy.gen.go | 2 +- .../networking/v1alpha3/workload_group.pb.go | 167 +- .../v1alpha3/workload_group.pb.html | 4 - .../v1alpha3/workload_group_deepcopy.gen.go | 2 +- .../v1beta1/destination_rule.gen.json | 259 +- .../networking/v1beta1/destination_rule.pb.go | 1444 +- .../networking/v1beta1/destination_rule.proto | 100 +- .../v1beta1/destination_rule_deepcopy.gen.go | 44 +- .../v1beta1/destination_rule_json.gen.go | 22 + .../api/networking/v1beta1/gateway.gen.json | 13 +- .../api/networking/v1beta1/gateway.pb.go | 400 +- .../api/networking/v1beta1/gateway.proto | 52 +- .../v1beta1/gateway_deepcopy.gen.go | 2 +- .../networking/v1beta1/proxy_config.gen.json | 2 +- .../api/networking/v1beta1/proxy_config.pb.go | 12 +- .../networking/v1beta1/proxy_config.pb.html | 20 +- .../api/networking/v1beta1/proxy_config.proto | 10 +- .../v1beta1/proxy_config_deepcopy.gen.go | 2 +- .../networking/v1beta1/service_entry.gen.json | 46 +- .../networking/v1beta1/service_entry.pb.go | 240 +- .../networking/v1beta1/service_entry.proto | 42 +- .../v1beta1/service_entry_deepcopy.gen.go | 23 +- .../v1beta1/service_entry_json.gen.go | 11 + .../api/networking/v1beta1/sidecar.gen.json | 19 +- .../api/networking/v1beta1/sidecar.pb.go | 143 +- .../api/networking/v1beta1/sidecar.proto | 17 +- .../v1beta1/sidecar_deepcopy.gen.go | 2 +- .../v1beta1/virtual_service.gen.json | 83 +- .../networking/v1beta1/virtual_service.pb.go | 2656 ++-- .../networking/v1beta1/virtual_service.proto | 221 +- .../v1beta1/virtual_service_deepcopy.gen.go | 44 +- .../v1beta1/virtual_service_json.gen.go | 22 + .../networking/v1beta1/workload_entry.pb.go | 64 +- .../v1beta1/workload_entry_deepcopy.gen.go | 2 +- .../networking/v1beta1/workload_group.pb.go | 169 +- .../networking/v1beta1/workload_group.pb.html | 4 - .../v1beta1/workload_group_deepcopy.gen.go | 2 +- .../security/v1/authorization_policy.pb.go | 1349 ++ .../security/v1/authorization_policy.pb.html | 802 ++ .../security/v1/authorization_policy.proto | 516 + .../v1/authorization_policy_deepcopy.gen.go | 174 + .../v1/authorization_policy_json.gen.go | 100 + vendor/istio.io/api/security/v1/jwt.pb.go | 522 + vendor/istio.io/api/security/v1/jwt.pb.html | 276 + vendor/istio.io/api/security/v1/jwt.proto | 186 + .../api/security/v1/jwt_deepcopy.gen.go | 69 + .../istio.io/api/security/v1/jwt_json.gen.go | 45 + .../security/v1/request_authentication.pb.go | 458 + .../v1/request_authentication.pb.html | 247 + .../security/v1/request_authentication.proto | 262 + .../v1/request_authentication_deepcopy.gen.go | 27 + .../v1/request_authentication_json.gen.go | 23 + .../v1beta1/authorization_policy.gen.json | 8 +- .../v1beta1/authorization_policy.pb.go | 155 +- .../v1beta1/authorization_policy.pb.html | 169 +- .../v1beta1/authorization_policy.proto | 132 +- .../authorization_policy_deepcopy.gen.go | 2 +- .../api/security/v1beta1/jwt.gen.json | 31 +- .../istio.io/api/security/v1beta1/jwt.pb.go | 221 +- .../istio.io/api/security/v1beta1/jwt.pb.html | 95 +- .../istio.io/api/security/v1beta1/jwt.proto | 41 +- .../api/security/v1beta1/jwt_deepcopy.gen.go | 23 +- .../api/security/v1beta1/jwt_json.gen.go | 11 + .../v1beta1/peer_authentication.pb.go | 92 +- .../v1beta1/peer_authentication.pb.html | 12 +- .../v1beta1/peer_authentication.proto | 2 +- .../peer_authentication_deepcopy.gen.go | 2 +- .../v1beta1/request_authentication.gen.json | 31 +- .../v1beta1/request_authentication.pb.go | 251 +- .../v1beta1/request_authentication.pb.html | 26 +- .../v1beta1/request_authentication.proto | 15 +- .../request_authentication_deepcopy.gen.go | 2 +- .../api/telemetry/v1alpha1/telemetry.pb.go | 291 +- .../api/telemetry/v1alpha1/telemetry.pb.html | 112 +- .../api/telemetry/v1alpha1/telemetry.proto | 28 +- .../v1alpha1/telemetry_deepcopy.gen.go | 2 +- .../api/type/v1beta1/selector.gen.json | 20 + .../istio.io/api/type/v1beta1/selector.pb.go | 174 +- .../api/type/v1beta1/selector.pb.html | 83 +- .../istio.io/api/type/v1beta1/selector.proto | 31 + .../api/type/v1beta1/selector_deepcopy.gen.go | 23 +- .../api/type/v1beta1/selector_json.gen.go | 11 + vendor/istio.io/client-go/LICENSE | 2 +- .../pkg/apis/extensions/v1alpha1/types.gen.go | 2 +- .../v1alpha1/zz_generated.deepcopy.gen.go | 8 +- .../pkg/apis/networking/v1alpha3/types.gen.go | 22 +- .../v1alpha3/zz_generated.deepcopy.gen.go | 64 +- .../pkg/apis/networking/v1beta1/types.gen.go | 24 +- .../v1beta1/zz_generated.deepcopy.gen.go | 64 +- .../client-go/pkg/apis/security/v1/doc.go | 21 + .../pkg/apis/security/v1/register.gen.go | 51 + .../pkg/apis/security/v1/types.gen.go | 352 + .../security/v1/zz_generated.deepcopy.gen.go | 154 + .../pkg/apis/security/v1beta1/types.gen.go | 350 +- .../v1beta1/zz_generated.deepcopy.gen.go | 24 +- .../pkg/apis/telemetry/v1alpha1/types.gen.go | 12 +- .../v1alpha1/zz_generated.deepcopy.gen.go | 8 +- .../versioned/scheme/register.gen.go | 16 +- .../v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../admissionregistration/v1/generated.pb.go | 484 +- .../admissionregistration/v1/generated.proto | 77 + .../api/admissionregistration/v1/types.go | 77 + .../v1/types_swagger_doc_generated.go | 14 +- .../v1/zz_generated.deepcopy.go | 26 + .../api/admissionregistration/v1alpha1/doc.go | 23 + .../v1alpha1/generated.pb.go | 4634 +++++++ .../v1alpha1/generated.proto | 609 + .../v1alpha1/register.go | 56 + .../admissionregistration/v1alpha1/types.go | 665 + .../v1alpha1/types_swagger_doc_generated.go | 204 + .../v1alpha1/zz_generated.deepcopy.go | 475 + .../v1beta1/generated.pb.go | 6119 +++++++-- .../v1beta1/generated.proto | 669 +- .../admissionregistration/v1beta1/register.go | 4 + .../admissionregistration/v1beta1/types.go | 737 +- .../v1beta1/types_swagger_doc_generated.go | 193 +- .../v1beta1/zz_generated.deepcopy.go | 463 +- .../zz_generated.prerelease-lifecycle.go | 72 + vendor/k8s.io/api/apidiscovery/v2beta1/doc.go | 24 + .../api/apidiscovery/v2beta1/generated.pb.go | 1744 +++ .../api/apidiscovery/v2beta1/generated.proto | 156 + .../api/apidiscovery/v2beta1/register.go | 56 + .../k8s.io/api/apidiscovery/v2beta1/types.go | 163 + .../v2beta1/zz_generated.deepcopy.go | 190 + .../zz_generated.prerelease-lifecycle.go | 58 + .../v1alpha1/generated.pb.go | 148 +- .../v1alpha1/generated.proto | 5 + .../api/apiserverinternal/v1alpha1/types.go | 5 + .../v1alpha1/types_swagger_doc_generated.go | 3 +- .../v1alpha1/zz_generated.deepcopy.go | 5 + vendor/k8s.io/api/apps/v1/generated.pb.go | 481 +- vendor/k8s.io/api/apps/v1/generated.proto | 32 +- vendor/k8s.io/api/apps/v1/types.go | 35 +- .../apps/v1/types_swagger_doc_generated.go | 20 +- .../api/apps/v1/zz_generated.deepcopy.go | 21 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 459 +- .../k8s.io/api/apps/v1beta1/generated.proto | 82 +- vendor/k8s.io/api/apps/v1beta1/types.go | 82 +- .../v1beta1/types_swagger_doc_generated.go | 70 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 21 + .../k8s.io/api/apps/v1beta2/generated.pb.go | 498 +- .../k8s.io/api/apps/v1beta2/generated.proto | 34 +- vendor/k8s.io/api/apps/v1beta2/types.go | 34 +- .../v1beta2/types_swagger_doc_generated.go | 22 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 21 + .../api/authentication/v1/generated.pb.go | 511 +- .../api/authentication/v1/generated.proto | 20 + .../k8s.io/api/authentication/v1/register.go | 1 + vendor/k8s.io/api/authentication/v1/types.go | 25 + .../v1/types_swagger_doc_generated.go | 21 +- .../v1/zz_generated.deepcopy.go | 44 + .../k8s.io/api/authentication/v1alpha1/doc.go | 23 + .../authentication/v1alpha1/generated.pb.go | 567 + .../authentication/v1alpha1/generated.proto | 51 + .../api/authentication/v1alpha1/register.go | 51 + .../api/authentication/v1alpha1/types.go | 48 + .../v1alpha1/types_swagger_doc_generated.go | 49 + .../v1alpha1/zz_generated.deepcopy.go | 70 + .../zz_generated.prerelease-lifecycle.go | 40 + .../authentication/v1beta1/generated.pb.go | 476 +- .../authentication/v1beta1/generated.proto | 21 + .../api/authentication/v1beta1/register.go | 1 + .../api/authentication/v1beta1/types.go | 27 + .../v1beta1/types_swagger_doc_generated.go | 21 +- .../v1beta1/zz_generated.deepcopy.go | 44 + .../zz_generated.prerelease-lifecycle.go | 18 + .../v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/autoscaling/v1/generated.proto | 42 +- vendor/k8s.io/api/autoscaling/v1/types.go | 79 +- .../v1/types_swagger_doc_generated.go | 42 +- .../k8s.io/api/autoscaling/v2/generated.proto | 20 +- vendor/k8s.io/api/autoscaling/v2/types.go | 61 +- .../v2/types_swagger_doc_generated.go | 22 +- .../api/autoscaling/v2beta1/generated.proto | 4 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 4 +- .../v2beta1/types_swagger_doc_generated.go | 6 +- .../api/autoscaling/v2beta2/generated.proto | 24 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 62 +- .../v2beta2/types_swagger_doc_generated.go | 26 +- vendor/k8s.io/api/batch/v1/generated.pb.go | 398 +- vendor/k8s.io/api/batch/v1/generated.proto | 102 +- vendor/k8s.io/api/batch/v1/types.go | 180 +- .../batch/v1/types_swagger_doc_generated.go | 37 +- .../api/batch/v1/zz_generated.deepcopy.go | 25 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 317 +- .../k8s.io/api/batch/v1beta1/generated.proto | 15 +- vendor/k8s.io/api/batch/v1beta1/register.go | 1 - vendor/k8s.io/api/batch/v1beta1/types.go | 20 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../batch/v1beta1/zz_generated.deepcopy.go | 27 - .../zz_generated.prerelease-lifecycle.go | 18 - vendor/k8s.io/api/certificates/v1/types.go | 3 +- .../v1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/certificates/v1alpha1/doc.go | 24 + .../api/certificates/v1alpha1/generated.pb.go | 831 ++ .../api/certificates/v1alpha1/generated.proto | 103 + .../api/certificates/v1alpha1/register.go | 61 + .../k8s.io/api/certificates/v1alpha1/types.go | 106 + .../v1alpha1/types_swagger_doc_generated.go | 60 + .../v1alpha1/zz_generated.deepcopy.go | 102 + .../zz_generated.prerelease-lifecycle.go | 58 + .../api/certificates/v1beta1/generated.proto | 6 +- .../k8s.io/api/certificates/v1beta1/types.go | 9 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../api/coordination/v1/generated.proto | 6 +- vendor/k8s.io/api/coordination/v1/types.go | 6 +- .../v1/types_swagger_doc_generated.go | 8 +- .../api/coordination/v1beta1/generated.proto | 6 +- .../k8s.io/api/coordination/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 8 +- .../api/core/v1/annotation_key_constants.go | 27 +- vendor/k8s.io/api/core/v1/generated.pb.go | 7651 ++++++++--- vendor/k8s.io/api/core/v1/generated.proto | 655 +- vendor/k8s.io/api/core/v1/toleration.go | 14 +- vendor/k8s.io/api/core/v1/types.go | 832 +- .../core/v1/types_swagger_doc_generated.go | 260 +- .../k8s.io/api/core/v1/well_known_labels.go | 4 + .../api/core/v1/zz_generated.deepcopy.go | 376 +- .../k8s.io/api/discovery/v1/generated.proto | 34 +- vendor/k8s.io/api/discovery/v1/types.go | 50 +- .../v1/types_swagger_doc_generated.go | 18 +- .../api/discovery/v1beta1/generated.proto | 19 +- vendor/k8s.io/api/discovery/v1beta1/types.go | 36 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../events/v1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 11064 ++++++---------- .../api/extensions/v1beta1/generated.proto | 363 +- .../k8s.io/api/extensions/v1beta1/register.go | 2 - vendor/k8s.io/api/extensions/v1beta1/types.go | 492 +- .../v1beta1/types_swagger_doc_generated.go | 213 +- .../v1beta1/zz_generated.deepcopy.go | 457 +- .../zz_generated.prerelease-lifecycle.go | 48 - vendor/k8s.io/api/flowcontrol/v1/doc.go | 24 + .../{v1alpha1 => v1}/generated.pb.go | 607 +- .../k8s.io/api/flowcontrol/v1/generated.proto | 520 + .../flowcontrol/{v1alpha1 => v1}/register.go | 6 +- .../api/flowcontrol/{v1alpha1 => v1}/types.go | 181 +- .../v1/types_swagger_doc_generated.go | 274 + .../flowcontrol/v1/zz_generated.deepcopy.go | 588 + .../api/flowcontrol/v1beta1/generated.pb.go | 530 +- .../api/flowcontrol/v1beta1/generated.proto | 73 +- .../k8s.io/api/flowcontrol/v1beta1/types.go | 88 +- .../v1beta1/types_swagger_doc_generated.go | 17 +- .../v1beta1/zz_generated.deepcopy.go | 41 + .../zz_generated.prerelease-lifecycle.go | 8 +- .../api/flowcontrol/v1beta2/generated.pb.go | 531 +- .../api/flowcontrol/v1beta2/generated.proto | 73 +- .../k8s.io/api/flowcontrol/v1beta2/types.go | 84 +- .../v1beta2/types_swagger_doc_generated.go | 17 +- .../v1beta2/zz_generated.deepcopy.go | 41 + .../zz_generated.prerelease-lifecycle.go | 28 + .../flowcontrol/{v1alpha1 => v1beta3}/doc.go | 6 +- .../api/flowcontrol/v1beta3/generated.pb.go | 5663 ++++++++ .../{v1alpha1 => v1beta3}/generated.proto | 105 +- .../api/flowcontrol/v1beta3/register.go | 58 + .../k8s.io/api/flowcontrol/v1beta3/types.go | 677 + .../types_swagger_doc_generated.go | 19 +- .../zz_generated.deepcopy.go | 43 +- .../zz_generated.prerelease-lifecycle.go | 34 +- .../k8s.io/api/networking/v1/generated.pb.go | 1316 +- .../k8s.io/api/networking/v1/generated.proto | 228 +- vendor/k8s.io/api/networking/v1/types.go | 273 +- .../v1/types_swagger_doc_generated.go | 143 +- .../networking/v1/zz_generated.deepcopy.go | 91 +- .../api/networking/v1alpha1/generated.pb.go | 1451 +- .../api/networking/v1alpha1/generated.proto | 122 +- .../api/networking/v1alpha1/register.go | 16 +- .../k8s.io/api/networking/v1alpha1/types.go | 143 +- .../v1alpha1/types_swagger_doc_generated.go | 86 +- .../networking/v1alpha1/well_known_labels.go | 33 + .../v1alpha1/zz_generated.deepcopy.go | 161 +- .../zz_generated.prerelease-lifecycle.go | 60 +- .../api/networking/v1beta1/generated.pb.go | 830 +- .../api/networking/v1beta1/generated.proto | 113 +- vendor/k8s.io/api/networking/v1beta1/types.go | 122 +- .../v1beta1/types_swagger_doc_generated.go | 89 +- .../v1beta1/zz_generated.deepcopy.go | 67 + vendor/k8s.io/api/node/v1/generated.proto | 10 +- vendor/k8s.io/api/node/v1/types.go | 12 +- .../node/v1/types_swagger_doc_generated.go | 12 +- .../k8s.io/api/node/v1alpha1/generated.proto | 14 +- vendor/k8s.io/api/node/v1alpha1/types.go | 16 +- .../v1alpha1/types_swagger_doc_generated.go | 14 +- .../k8s.io/api/node/v1beta1/generated.proto | 12 +- vendor/k8s.io/api/node/v1beta1/types.go | 14 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- vendor/k8s.io/api/policy/v1/doc.go | 2 +- vendor/k8s.io/api/policy/v1/generated.pb.go | 150 +- vendor/k8s.io/api/policy/v1/generated.proto | 28 + vendor/k8s.io/api/policy/v1/types.go | 48 + .../policy/v1/types_swagger_doc_generated.go | 11 +- .../api/policy/v1/zz_generated.deepcopy.go | 5 + vendor/k8s.io/api/policy/v1beta1/doc.go | 2 +- .../k8s.io/api/policy/v1beta1/generated.pb.go | 4917 +------ .../k8s.io/api/policy/v1beta1/generated.proto | 305 +- vendor/k8s.io/api/policy/v1beta1/register.go | 2 - vendor/k8s.io/api/policy/v1beta1/types.go | 419 +- .../v1beta1/types_swagger_doc_generated.go | 171 +- .../policy/v1beta1/zz_generated.deepcopy.go | 372 +- .../zz_generated.prerelease-lifecycle.go | 36 - vendor/k8s.io/api/rbac/v1/generated.proto | 2 + vendor/k8s.io/api/rbac/v1/types.go | 2 + .../rbac/v1/types_swagger_doc_generated.go | 6 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/resource/v1alpha2/doc.go | 24 + .../api/resource/v1alpha2/generated.pb.go | 4817 +++++++ .../api/resource/v1alpha2/generated.proto | 400 + .../k8s.io/api/resource/v1alpha2/register.go | 63 + vendor/k8s.io/api/resource/v1alpha2/types.go | 462 + .../v1alpha2/types_swagger_doc_generated.go | 232 + .../v1alpha2/zz_generated.deepcopy.go | 498 + .../k8s.io/api/scheduling/v1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1/types.go | 4 +- .../v1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1alpha1/generated.proto | 4 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 4 +- .../v1alpha1/types_swagger_doc_generated.go | 6 +- .../api/scheduling/v1beta1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1beta1/types.go | 4 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- vendor/k8s.io/api/storage/v1/generated.proto | 128 +- vendor/k8s.io/api/storage/v1/types.go | 134 +- .../storage/v1/types_swagger_doc_generated.go | 82 +- .../api/storage/v1alpha1/generated.pb.go | 729 +- .../api/storage/v1alpha1/generated.proto | 78 +- .../k8s.io/api/storage/v1alpha1/register.go | 2 + vendor/k8s.io/api/storage/v1alpha1/types.go | 93 +- .../v1alpha1/types_swagger_doc_generated.go | 59 +- .../storage/v1alpha1/zz_generated.deepcopy.go | 66 + .../zz_generated.prerelease-lifecycle.go | 36 + .../api/storage/v1beta1/generated.proto | 114 +- vendor/k8s.io/api/storage/v1beta1/types.go | 121 +- .../v1beta1/types_swagger_doc_generated.go | 78 +- .../apimachinery/pkg/api/meta/conditions.go | 37 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 86 +- .../apimachinery/pkg/api/resource/amount.go | 38 + .../apimachinery/pkg/api/resource/quantity.go | 10 + .../pkg/apis/meta/internalversion/defaults.go | 38 + .../pkg/apis/meta/internalversion/types.go | 25 + .../zz_generated.conversion.go | 2 + .../internalversion/zz_generated.deepcopy.go | 5 + .../pkg/apis/meta/v1/generated.pb.go | 385 +- .../pkg/apis/meta/v1/generated.proto | 71 +- .../apimachinery/pkg/apis/meta/v1/types.go | 91 +- .../meta/v1/types_swagger_doc_generated.go | 25 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 2 +- .../apis/meta/v1/unstructured/unstructured.go | 5 + .../meta/v1/unstructured/unstructured_list.go | 9 + .../apis/meta/v1/zz_generated.conversion.go | 7 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 5 + .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 2 + .../apimachinery/pkg/labels/selector.go | 131 +- .../k8s.io/apimachinery/pkg/runtime/codec.go | 1 - .../apimachinery/pkg/runtime/converter.go | 4 +- .../k8s.io/apimachinery/pkg/runtime/helper.go | 23 + .../apimachinery/pkg/runtime/interfaces.go | 5 + .../pkg/runtime/schema/group_version.go | 8 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 3 +- .../pkg/runtime/serializer/codec_factory.go | 3 +- .../runtime/serializer/streaming/streaming.go | 20 - .../serializer/versioning/versioning.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/splice.go | 76 + .../k8s.io/apimachinery/pkg/runtime/types.go | 2 +- .../apimachinery/pkg/types/namespacedname.go | 11 + .../apimachinery/pkg/util/cache/expiring.go | 12 +- .../pkg/util/cache/lruexpirecache.go | 13 + .../k8s.io/apimachinery/pkg/util/diff/diff.go | 37 +- .../k8s.io/apimachinery/pkg/util/dump/dump.go | 54 + .../apimachinery/pkg/util/errors/errors.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 2 +- .../apimachinery/pkg/util/intstr/intstr.go | 13 +- .../pkg/util/managedfields/endpoints.yaml | 7018 ++++++++++ .../pkg/util/managedfields/fieldmanager.go | 57 + .../managedfields/internal/atmostevery.go | 60 + .../internal/buildmanagerinfo.go | 74 + .../managedfields/internal/capmanagers.go | 133 + .../util/managedfields/internal/conflict.go | 89 + .../managedfields/internal/fieldmanager.go | 209 + .../pkg/util/managedfields/internal/fields.go | 47 + .../managedfields/internal/lastapplied.go | 50 + .../internal/lastappliedmanager.go | 171 + .../internal/lastappliedupdater.go | 102 + .../managedfields/internal/managedfields.go | 248 + .../internal/managedfieldsupdater.go | 82 + .../util/managedfields/internal/manager.go | 52 + .../managedfields/internal/pathelement.go | 140 + .../managedfields/internal/skipnonapplied.go | 89 + .../util/managedfields/internal/stripmeta.go | 90 + .../managedfields/internal/structuredmerge.go | 189 + .../managedfields/internal/typeconverter.go | 193 + .../managedfields/internal/versioncheck.go | 52 + .../internal/versionconverter.go | 123 + .../pkg/util/managedfields/node.yaml | 261 + .../pkg/util/managedfields/pod.yaml | 121 + .../pkg/util/managedfields/scalehandler.go | 174 + .../pkg/util/managedfields/typeconverter.go | 47 + .../apimachinery/pkg/util/mergepatch/util.go | 7 +- .../k8s.io/apimachinery/pkg/util/net/util.go | 6 + .../apimachinery/pkg/util/runtime/runtime.go | 15 +- .../k8s.io/apimachinery/pkg/util/sets/set.go | 14 + .../pkg/util/strategicpatch/meta.go | 89 + .../pkg/util/strategicpatch/patch.go | 65 +- .../pkg/util/validation/field/errors.go | 4 +- .../pkg/util/validation/validation.go | 8 +- .../apimachinery/pkg/util/wait/backoff.go | 502 + .../apimachinery/pkg/util/wait/delay.go | 51 + .../apimachinery/pkg/util/wait/error.go | 96 + .../k8s.io/apimachinery/pkg/util/wait/loop.go | 95 + .../k8s.io/apimachinery/pkg/util/wait/poll.go | 315 + .../apimachinery/pkg/util/wait/timer.go | 121 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 634 +- .../v1/matchcondition.go | 48 + .../v1/mutatingwebhook.go | 14 + .../v1/validatingwebhook.go | 14 + .../v1alpha1/auditannotation.go | 48 + .../v1alpha1/expressionwarning.go | 48 + .../v1alpha1/matchcondition.go | 48 + .../v1alpha1/matchresources.go | 90 + .../v1alpha1/namedrulewithoperations.go | 95 + .../v1alpha1/paramkind.go | 48 + .../v1alpha1/paramref.go | 71 + .../v1alpha1/typechecking.go | 44 + .../v1alpha1/validatingadmissionpolicy.go | 256 + .../validatingadmissionpolicybinding.go | 247 + .../validatingadmissionpolicybindingspec.go | 72 + .../v1alpha1/validatingadmissionpolicyspec.go | 117 + .../validatingadmissionpolicystatus.go | 66 + .../v1alpha1/validation.go | 70 + .../v1alpha1/variable.go | 48 + .../v1beta1/auditannotation.go | 48 + .../v1beta1/expressionwarning.go | 48 + .../v1beta1/matchcondition.go | 48 + .../v1beta1/matchresources.go | 90 + .../v1beta1/mutatingwebhook.go | 29 +- ...erations.go => namedrulewithoperations.go} | 37 +- .../v1beta1/paramkind.go | 48 + .../admissionregistration/v1beta1/paramref.go | 71 + .../admissionregistration/v1beta1/rule.go | 76 - .../v1beta1/typechecking.go | 44 + .../v1beta1/validatingadmissionpolicy.go | 256 + .../validatingadmissionpolicybinding.go | 247 + .../validatingadmissionpolicybindingspec.go | 72 + .../v1beta1/validatingadmissionpolicyspec.go | 117 + .../validatingadmissionpolicystatus.go | 66 + .../v1beta1/validatingwebhook.go | 29 +- .../v1beta1/validation.go | 70 + .../admissionregistration/v1beta1/variable.go | 48 + .../v1alpha1/serverstorageversion.go | 11 + .../apps/v1/statefulsetordinals.go | 39 + .../apps/v1/statefulsetspec.go | 9 + .../v1beta1/statefulsetordinals.go} | 20 +- .../apps/v1beta1/statefulsetspec.go | 9 + .../apps/v1beta2/statefulsetordinals.go | 39 + .../apps/v1beta2/statefulsetspec.go | 9 + .../applyconfigurations/batch/v1/jobspec.go | 27 + .../applyconfigurations/batch/v1/jobstatus.go | 18 + .../v1alpha1/clustertrustbundle.go | 247 + .../v1alpha1/clustertrustbundlespec.go | 48 + .../core/v1/claimsource.go | 48 + .../core/v1/clustertrustbundleprojection.go | 79 + .../applyconfigurations/core/v1/container.go | 67 +- .../core/v1/containerresizepolicy.go | 52 + .../core/v1/containerstatus.go | 40 +- .../core/v1/ephemeralcontainer.go | 21 + .../core/v1/ephemeralcontainercommon.go | 67 +- .../applyconfigurations/core/v1/hostip.go | 39 + .../core/v1/lifecyclehandler.go | 9 + .../core/v1/loadbalanceringress.go | 13 + .../core/v1/modifyvolumestatus.go | 52 + .../core/v1/persistentvolumeclaimspec.go | 29 +- .../core/v1/persistentvolumeclaimstatus.go | 44 +- .../core/v1/persistentvolumespec.go | 9 + .../core/v1/persistentvolumestatus.go | 16 +- .../core/v1/podaffinityterm.go | 22 + .../v1/podresourceclaim.go} | 30 +- .../core/v1/podresourceclaimstatus.go | 48 + .../core/v1/podschedulinggate.go | 39 + .../applyconfigurations/core/v1/podspec.go | 28 + .../applyconfigurations/core/v1/podstatus.go | 63 +- .../v1/resourceclaim.go} | 14 +- .../core/v1/resourcerequirements.go | 18 +- .../core/v1/servicespec.go | 8 +- .../core/v1/sleepaction.go | 39 + .../core/v1/typedobjectreference.go | 66 + .../core/v1/volumeprojection.go | 9 + .../core/v1/volumeresourcerequirements.go | 52 + .../extensions/v1beta1/allowedhostpath.go | 48 - .../v1beta1/fsgroupstrategyoptions.go | 57 - .../extensions/v1beta1/hostportrange.go | 48 - .../extensions/v1beta1/idrange.go | 48 - .../v1beta1/ingressloadbalanceringress.go | 62 + .../v1beta1/ingressloadbalancerstatus.go | 44 + .../extensions/v1beta1/ingressportstatus.go | 61 + .../extensions/v1beta1/ingressstatus.go | 8 +- .../extensions/v1beta1/networkpolicy.go | 11 +- .../v1beta1/podsecuritypolicyspec.go | 285 - .../v1beta1/runasgroupstrategyoptions.go | 57 - .../v1beta1/runasuserstrategyoptions.go | 57 - .../v1beta1/runtimeclassstrategyoptions.go | 50 - .../v1beta1/selinuxstrategyoptions.go | 53 - .../supplementalgroupsstrategyoptions.go | 57 - .../v1/exemptprioritylevelconfiguration.go | 48 + .../flowdistinguishermethod.go | 8 +- .../{v1alpha1 => v1}/flowschema.go | 16 +- .../flowcontrol/v1/flowschemacondition.go | 80 + .../{v1alpha1 => v1}/flowschemaspec.go | 2 +- .../{v1alpha1 => v1}/flowschemastatus.go | 2 +- .../{v1alpha1 => v1}/groupsubject.go | 2 +- .../limitedprioritylevelconfiguration.go | 30 +- .../{v1alpha1 => v1}/limitresponse.go | 8 +- .../{v1alpha1 => v1}/nonresourcepolicyrule.go | 2 +- .../policyruleswithsubjects.go | 2 +- .../prioritylevelconfiguration.go | 16 +- .../v1/prioritylevelconfigurationcondition.go | 80 + .../prioritylevelconfigurationreference.go | 2 +- .../prioritylevelconfigurationspec.go | 17 +- .../prioritylevelconfigurationstatus.go | 2 +- .../{v1alpha1 => v1}/queuingconfiguration.go | 2 +- .../{v1alpha1 => v1}/resourcepolicyrule.go | 2 +- .../{v1alpha1 => v1}/serviceaccountsubject.go | 2 +- .../flowcontrol/{v1alpha1 => v1}/subject.go | 8 +- .../{v1alpha1 => v1}/usersubject.go | 2 +- .../exemptprioritylevelconfiguration.go | 48 + .../limitedprioritylevelconfiguration.go | 18 + .../v1beta1/prioritylevelconfigurationspec.go | 9 + .../exemptprioritylevelconfiguration.go | 48 + .../limitedprioritylevelconfiguration.go | 18 + .../v1beta2/prioritylevelconfigurationspec.go | 9 + .../exemptprioritylevelconfiguration.go | 48 + .../v1beta3/flowdistinguishermethod.go | 43 + .../v1beta3/flowschema.go} | 95 +- .../flowschemacondition.go | 18 +- .../flowcontrol/v1beta3/flowschemaspec.go | 71 + .../v1beta3/flowschemastatus.go} | 20 +- .../v1beta3/groupsubject.go} | 14 +- .../limitedprioritylevelconfiguration.go | 66 + .../flowcontrol/v1beta3/limitresponse.go | 52 + .../v1beta3/nonresourcepolicyrule.go | 52 + .../v1beta3/policyruleswithsubjects.go | 72 + .../v1beta3/prioritylevelconfiguration.go | 256 + .../prioritylevelconfigurationcondition.go | 18 +- .../prioritylevelconfigurationreference.go | 39 + .../v1beta3/prioritylevelconfigurationspec.go | 61 + .../prioritylevelconfigurationstatus.go | 44 + .../v1beta3/queuingconfiguration.go | 57 + .../flowcontrol/v1beta3/resourcepolicyrule.go | 83 + .../v1beta3/serviceaccountsubject.go | 48 + .../flowcontrol/v1beta3/subject.go | 70 + .../flowcontrol/v1beta3/usersubject.go | 39 + .../applyconfigurations/internal/internal.go | 2881 ++-- .../applyconfigurations/meta/v1/listmeta.go | 66 - .../meta/v1/unstructured.go | 2 +- .../v1/ingressloadbalanceringress.go | 62 + .../v1/ingressloadbalancerstatus.go | 44 + .../networking/v1/ingressportstatus.go | 61 + .../networking/v1/ingressstatus.go | 8 +- .../networking/v1/networkpolicy.go | 11 +- .../networking/v1alpha1/clustercidrspec.go | 70 - .../v1alpha1/{clustercidr.go => ipaddress.go} | 78 +- .../networking/v1alpha1/ipaddressspec.go | 39 + .../networking/v1alpha1/parentreference.go | 66 + .../v1alpha1/servicecidr.go} | 95 +- .../networking/v1alpha1/servicecidrspec.go | 41 + .../servicecidrstatus.go} | 14 +- .../v1beta1/ingressloadbalanceringress.go | 62 + .../v1beta1/ingressloadbalancerstatus.go | 44 + .../networking/v1beta1/ingressportstatus.go | 61 + .../networking/v1beta1/ingressstatus.go | 8 +- .../policy/v1/poddisruptionbudgetspec.go | 16 +- .../policy/v1beta1/allowedflexvolume.go | 39 - .../policy/v1beta1/allowedhostpath.go | 48 - .../policy/v1beta1/fsgroupstrategyoptions.go | 57 - .../policy/v1beta1/hostportrange.go | 48 - .../policy/v1beta1/idrange.go | 48 - .../policy/v1beta1/poddisruptionbudgetspec.go | 16 +- .../policy/v1beta1/podsecuritypolicyspec.go | 285 - .../v1beta1/runasgroupstrategyoptions.go | 57 - .../v1beta1/runasuserstrategyoptions.go | 57 - .../v1beta1/runtimeclassstrategyoptions.go | 50 - .../policy/v1beta1/selinuxstrategyoptions.go | 53 - .../supplementalgroupsstrategyoptions.go | 57 - .../resource/v1alpha2/allocationresult.go | 66 + .../resource/v1alpha2/podschedulingcontext.go | 258 + .../v1alpha2/podschedulingcontextspec.go | 50 + .../v1alpha2/podschedulingcontextstatus.go | 44 + .../resource/v1alpha2/resourceclaim.go | 258 + .../resourceclaimconsumerreference.go | 70 + .../resourceclaimparametersreference.go | 57 + .../v1alpha2/resourceclaimschedulingstatus.go | 50 + .../resource/v1alpha2/resourceclaimspec.go | 61 + .../resource/v1alpha2/resourceclaimstatus.go | 71 + .../v1alpha2/resourceclaimtemplate.go | 249 + .../v1alpha2/resourceclaimtemplatespec.go | 188 + .../resource/v1alpha2/resourceclass.go | 266 + .../resourceclassparametersreference.go | 66 + .../resource/v1alpha2/resourcehandle.go | 48 + .../storage/v1alpha1/volumeattributesclass.go | 262 + .../discovery/aggregated_discovery.go | 156 + .../client-go/discovery/discovery_client.go | 326 +- .../client-go/discovery/fake/discovery.go | 12 +- .../k8s.io/client-go/dynamic/fake/simple.go | 2 + vendor/k8s.io/client-go/dynamic/simple.go | 71 +- .../admissionregistration/interface.go | 8 + .../v1alpha1/interface.go | 52 + .../v1alpha1/validatingadmissionpolicy.go | 89 + .../validatingadmissionpolicybinding.go | 89 + .../v1beta1/interface.go | 14 + .../v1beta1/validatingadmissionpolicy.go | 89 + .../validatingadmissionpolicybinding.go | 89 + .../informers/certificates/interface.go | 8 + .../v1alpha1/clustertrustbundle.go | 89 + .../certificates/v1alpha1/interface.go | 45 + vendor/k8s.io/client-go/informers/doc.go | 18 + .../informers/extensions/v1beta1/interface.go | 7 - vendor/k8s.io/client-go/informers/factory.go | 97 +- .../informers/flowcontrol/interface.go | 20 +- .../informers/flowcontrol/v1/flowschema.go | 89 + .../flowcontrol/{v1alpha1 => v1}/interface.go | 2 +- .../v1/prioritylevelconfiguration.go | 89 + .../{v1alpha1 => v1beta3}/flowschema.go | 20 +- .../flowcontrol/v1beta3/interface.go | 52 + .../prioritylevelconfiguration.go | 20 +- vendor/k8s.io/client-go/informers/generic.go | 58 +- .../networking/v1alpha1/interface.go | 17 +- .../v1alpha1/ipaddress.go} | 44 +- .../{clustercidr.go => servicecidr.go} | 38 +- .../informers/policy/v1beta1/interface.go | 7 - .../client-go/informers/resource/interface.go | 46 + .../informers/resource/v1alpha2/interface.go | 66 + .../resource/v1alpha2/podschedulingcontext.go | 90 + .../resource/v1alpha2/resourceclaim.go | 90 + .../v1alpha2/resourceclaimtemplate.go | 90 + .../v1alpha2/resourceclass.go} | 44 +- .../informers/storage/v1alpha1/interface.go | 7 + .../storage/v1alpha1/volumeattributesclass.go | 89 + .../k8s.io/client-go/kubernetes/clientset.go | 174 +- vendor/k8s.io/client-go/kubernetes/doc.go | 7 +- .../kubernetes/fake/clientset_generated.go | 45 +- .../client-go/kubernetes/fake/register.go | 14 +- .../client-go/kubernetes/scheme/register.go | 14 +- .../fake/fake_mutatingwebhookconfiguration.go | 59 +- .../fake_validatingwebhookconfiguration.go | 59 +- .../v1alpha1/admissionregistration_client.go | 112 + .../v1alpha1/doc.go | 0 .../v1alpha1/fake/doc.go | 0 .../fake/fake_admissionregistration_client.go | 44 + .../fake/fake_validatingadmissionpolicy.go | 178 + .../fake_validatingadmissionpolicybinding.go | 145 + .../v1alpha1/generated_expansion.go | 23 + .../v1alpha1/validatingadmissionpolicy.go | 243 + .../validatingadmissionpolicybinding.go | 197 + .../v1beta1/admissionregistration_client.go | 10 + .../fake/fake_admissionregistration_client.go | 8 + .../fake/fake_mutatingwebhookconfiguration.go | 5 +- .../fake/fake_validatingadmissionpolicy.go | 178 + .../fake_validatingadmissionpolicybinding.go | 145 + .../fake_validatingwebhookconfiguration.go | 5 +- .../v1beta1/generated_expansion.go | 4 + .../v1beta1/validatingadmissionpolicy.go | 243 + .../validatingadmissionpolicybinding.go | 197 + .../v1alpha1/fake/fake_storageversion.go | 5 +- .../apps/v1/fake/fake_controllerrevision.go | 59 +- .../typed/apps/v1/fake/fake_daemonset.go | 71 +- .../typed/apps/v1/fake/fake_deployment.go | 77 +- .../typed/apps/v1/fake/fake_replicaset.go | 77 +- .../typed/apps/v1/fake/fake_statefulset.go | 77 +- .../v1beta1/fake/fake_controllerrevision.go | 5 +- .../apps/v1beta1/fake/fake_deployment.go | 5 +- .../apps/v1beta1/fake/fake_statefulset.go | 5 +- .../v1beta2/fake/fake_controllerrevision.go | 5 +- .../typed/apps/v1beta2/fake/fake_daemonset.go | 5 +- .../apps/v1beta2/fake/fake_deployment.go | 5 +- .../apps/v1beta2/fake/fake_replicaset.go | 5 +- .../apps/v1beta2/fake/fake_statefulset.go | 5 +- .../v1/authentication_client.go | 5 + .../v1/fake/fake_authentication_client.go | 4 + .../v1/fake/fake_selfsubjectreview.go | 46 + .../v1/fake/fake_tokenreview.go | 5 +- .../authentication/v1/generated_expansion.go | 2 + .../authentication/v1/selfsubjectreview.go | 64 + .../v1alpha1/authentication_client.go | 107 + .../typed/authentication}/v1alpha1/doc.go | 2 +- .../authentication/v1alpha1/fake}/doc.go | 6 +- .../fake/fake_authentication_client.go | 40 + .../v1alpha1/fake/fake_selfsubjectreview.go | 46 + .../v1alpha1/generated_expansion.go | 4 +- .../v1alpha1/selfsubjectreview.go | 64 + .../v1beta1/authentication_client.go | 5 + .../fake/fake_authentication_client.go | 4 + .../v1beta1/fake/fake_selfsubjectreview.go | 46 + .../v1beta1/fake/fake_tokenreview.go | 5 +- .../v1beta1/generated_expansion.go | 2 + .../v1beta1/selfsubjectreview.go | 64 + .../v1/fake/fake_localsubjectaccessreview.go | 5 +- .../v1/fake/fake_selfsubjectaccessreview.go | 5 +- .../v1/fake/fake_selfsubjectrulesreview.go | 5 +- .../v1/fake/fake_subjectaccessreview.go | 5 +- .../fake/fake_localsubjectaccessreview.go | 5 +- .../fake/fake_selfsubjectaccessreview.go | 5 +- .../fake/fake_selfsubjectrulesreview.go | 5 +- .../v1beta1/fake/fake_subjectaccessreview.go | 5 +- .../v1/fake/fake_horizontalpodautoscaler.go | 71 +- .../v2/fake/fake_horizontalpodautoscaler.go | 5 +- .../fake/fake_horizontalpodautoscaler.go | 5 +- .../fake/fake_horizontalpodautoscaler.go | 5 +- .../typed/batch/v1/fake/fake_cronjob.go | 71 +- .../typed/batch/v1/fake/fake_job.go | 71 +- .../typed/batch/v1beta1/fake/fake_cronjob.go | 5 +- .../v1/fake/fake_certificatesigningrequest.go | 77 +- .../v1alpha1/certificates_client.go} | 40 +- .../v1alpha1/clustertrustbundle.go | 197 + .../typed/certificates/v1alpha1/doc.go | 20 + .../typed/certificates/v1alpha1/fake}/doc.go | 6 +- .../v1alpha1/fake/fake_certificates_client.go | 40 + .../v1alpha1/fake/fake_clustertrustbundle.go | 145 + .../v1alpha1/generated_expansion.go} | 7 +- .../fake/fake_certificatesigningrequest.go | 5 +- .../typed/coordination/v1/fake/fake_lease.go | 59 +- .../coordination/v1beta1/fake/fake_lease.go | 5 +- .../core/v1/fake/fake_componentstatus.go | 59 +- .../typed/core/v1/fake/fake_configmap.go | 59 +- .../typed/core/v1/fake/fake_endpoints.go | 59 +- .../typed/core/v1/fake/fake_event.go | 59 +- .../typed/core/v1/fake/fake_limitrange.go | 59 +- .../typed/core/v1/fake/fake_namespace.go | 67 +- .../typed/core/v1/fake/fake_node.go | 71 +- .../core/v1/fake/fake_persistentvolume.go | 71 +- .../v1/fake/fake_persistentvolumeclaim.go | 71 +- .../kubernetes/typed/core/v1/fake/fake_pod.go | 77 +- .../typed/core/v1/fake/fake_pod_expansion.go | 4 +- .../typed/core/v1/fake/fake_podtemplate.go | 59 +- .../v1/fake/fake_replicationcontroller.go | 75 +- .../typed/core/v1/fake/fake_resourcequota.go | 71 +- .../typed/core/v1/fake/fake_secret.go | 59 +- .../typed/core/v1/fake/fake_service.go | 67 +- .../typed/core/v1/fake/fake_serviceaccount.go | 61 +- .../discovery/v1/fake/fake_endpointslice.go | 59 +- .../v1beta1/fake/fake_endpointslice.go | 5 +- .../typed/events/v1/fake/fake_event.go | 59 +- .../typed/events/v1beta1/event_expansion.go | 3 +- .../typed/events/v1beta1/fake/fake_event.go | 5 +- .../extensions/v1beta1/extensions_client.go | 5 - .../extensions/v1beta1/fake/fake_daemonset.go | 5 +- .../v1beta1/fake/fake_deployment.go | 5 +- .../v1beta1/fake/fake_extensions_client.go | 4 - .../extensions/v1beta1/fake/fake_ingress.go | 5 +- .../v1beta1/fake/fake_networkpolicy.go | 40 +- .../v1beta1/fake/fake_podsecuritypolicy.go | 146 - .../v1beta1/fake/fake_replicaset.go | 5 +- .../extensions/v1beta1/generated_expansion.go | 2 - .../typed/extensions/v1beta1/networkpolicy.go | 48 - .../extensions/v1beta1/podsecuritypolicy.go | 197 - .../kubernetes/typed/flowcontrol/v1/doc.go | 20 + .../typed/flowcontrol/v1/fake/doc.go | 20 + .../fake/fake_flowcontrol_client.go | 10 +- .../flowcontrol/v1/fake/fake_flowschema.go | 178 + .../fake/fake_prioritylevelconfiguration.go | 178 + .../{v1alpha1 => v1}/flowcontrol_client.go | 38 +- .../typed/flowcontrol/v1/flowschema.go | 243 + .../{v1alpha1 => v1}/generated_expansion.go | 2 +- .../v1/prioritylevelconfiguration.go | 243 + .../v1beta1/fake/fake_flowschema.go | 5 +- .../fake/fake_prioritylevelconfiguration.go | 5 +- .../v1beta2/fake/fake_flowschema.go | 5 +- .../fake/fake_prioritylevelconfiguration.go | 5 +- .../typed/flowcontrol/v1beta3/doc.go | 20 + .../typed/flowcontrol/v1beta3/fake/doc.go | 20 + .../v1beta3/fake/fake_flowcontrol_client.go | 44 + .../fake/fake_flowschema.go | 65 +- .../fake/fake_prioritylevelconfiguration.go | 65 +- .../flowcontrol/v1beta3/flowcontrol_client.go | 112 + .../{v1alpha1 => v1beta3}/flowschema.go | 56 +- .../v1beta3/generated_expansion.go | 23 + .../prioritylevelconfiguration.go | 56 +- .../typed/networking/v1/fake/fake_ingress.go | 71 +- .../networking/v1/fake/fake_ingressclass.go | 59 +- .../networking/v1/fake/fake_networkpolicy.go | 94 +- .../typed/networking/v1/networkpolicy.go | 48 - .../typed/networking/v1alpha1/clustercidr.go | 197 - .../v1alpha1/fake/fake_clustercidr.go | 146 - .../v1alpha1/fake/fake_ipaddress.go | 145 + .../v1alpha1/fake/fake_networking_client.go | 8 +- .../v1alpha1/fake/fake_servicecidr.go | 178 + .../v1alpha1/generated_expansion.go | 4 +- .../typed/networking/v1alpha1/ipaddress.go | 197 + .../networking/v1alpha1/networking_client.go | 11 +- .../typed/networking/v1alpha1/servicecidr.go | 243 + .../networking/v1beta1/fake/fake_ingress.go | 5 +- .../v1beta1/fake/fake_ingressclass.go | 5 +- .../typed/node/v1/fake/fake_runtimeclass.go | 59 +- .../node/v1alpha1/fake/fake_runtimeclass.go | 5 +- .../node/v1beta1/fake/fake_runtimeclass.go | 5 +- .../v1/fake/fake_poddisruptionbudget.go | 71 +- .../v1beta1/fake/fake_poddisruptionbudget.go | 5 +- .../v1beta1/fake/fake_podsecuritypolicy.go | 146 - .../policy/v1beta1/fake/fake_policy_client.go | 4 - .../policy/v1beta1/generated_expansion.go | 2 - .../typed/policy/v1beta1/podsecuritypolicy.go | 197 - .../typed/policy/v1beta1/policy_client.go | 5 - .../typed/rbac/v1/fake/fake_clusterrole.go | 59 +- .../rbac/v1/fake/fake_clusterrolebinding.go | 59 +- .../typed/rbac/v1/fake/fake_role.go | 59 +- .../typed/rbac/v1/fake/fake_rolebinding.go | 59 +- .../rbac/v1alpha1/fake/fake_clusterrole.go | 5 +- .../v1alpha1/fake/fake_clusterrolebinding.go | 5 +- .../typed/rbac/v1alpha1/fake/fake_role.go | 5 +- .../rbac/v1alpha1/fake/fake_rolebinding.go | 5 +- .../rbac/v1beta1/fake/fake_clusterrole.go | 5 +- .../v1beta1/fake/fake_clusterrolebinding.go | 5 +- .../typed/rbac/v1beta1/fake/fake_role.go | 5 +- .../rbac/v1beta1/fake/fake_rolebinding.go | 5 +- .../kubernetes/typed/resource/v1alpha2/doc.go | 20 + .../typed/resource/v1alpha2/fake/doc.go | 20 + .../fake/fake_podschedulingcontext.go | 189 + .../v1alpha2/fake/fake_resource_client.go | 52 + .../v1alpha2/fake/fake_resourceclaim.go | 189 + .../fake/fake_resourceclaimtemplate.go | 154 + .../v1alpha2/fake/fake_resourceclass.go | 145 + .../resource/v1alpha2/generated_expansion.go | 27 + .../resource/v1alpha2/podschedulingcontext.go | 256 + .../resource/v1alpha2/resource_client.go | 122 + .../typed/resource/v1alpha2/resourceclaim.go | 256 + .../v1alpha2/resourceclaimtemplate.go | 208 + .../typed/resource/v1alpha2/resourceclass.go | 197 + .../scheduling/v1/fake/fake_priorityclass.go | 59 +- .../v1alpha1/fake/fake_priorityclass.go | 5 +- .../v1beta1/fake/fake_priorityclass.go | 5 +- .../typed/storage/v1/fake/fake_csidriver.go | 59 +- .../typed/storage/v1/fake/fake_csinode.go | 59 +- .../v1/fake/fake_csistoragecapacity.go | 59 +- .../storage/v1/fake/fake_storageclass.go | 59 +- .../storage/v1/fake/fake_volumeattachment.go | 71 +- .../v1alpha1/fake/fake_csistoragecapacity.go | 5 +- .../v1alpha1/fake/fake_storage_client.go | 4 + .../v1alpha1/fake/fake_volumeattachment.go | 5 +- .../fake/fake_volumeattributesclass.go | 145 + .../storage/v1alpha1/generated_expansion.go | 2 + .../typed/storage/v1alpha1/storage_client.go | 5 + .../storage/v1alpha1/volumeattributesclass.go | 197 + .../storage/v1beta1/fake/fake_csidriver.go | 5 +- .../storage/v1beta1/fake/fake_csinode.go | 5 +- .../v1beta1/fake/fake_csistoragecapacity.go | 5 +- .../storage/v1beta1/fake/fake_storageclass.go | 5 +- .../v1beta1/fake/fake_volumeattachment.go | 5 +- .../v1alpha1/expansion_generated.go | 27 + .../v1alpha1/validatingadmissionpolicy.go | 68 + .../validatingadmissionpolicybinding.go | 68 + .../v1beta1/expansion_generated.go | 8 + .../v1beta1/validatingadmissionpolicy.go | 68 + .../validatingadmissionpolicybinding.go | 68 + .../v1alpha1/clustertrustbundle.go | 68 + .../v1alpha1/expansion_generated.go | 23 + .../extensions/v1beta1/expansion_generated.go | 4 - .../extensions/v1beta1/podsecuritypolicy.go | 68 - .../{v1alpha1 => v1}/expansion_generated.go | 2 +- .../{v1alpha1 => v1}/flowschema.go | 18 +- .../prioritylevelconfiguration.go | 18 +- .../v1beta3/expansion_generated.go | 27 + .../listers/flowcontrol/v1beta3/flowschema.go | 68 + .../v1beta3/prioritylevelconfiguration.go | 68 + .../v1alpha1/expansion_generated.go | 10 +- .../listers/networking/v1alpha1/ipaddress.go | 68 + .../{clustercidr.go => servicecidr.go} | 38 +- .../policy/v1beta1/expansion_generated.go | 4 - .../policy/v1beta1/podsecuritypolicy.go | 68 - .../resource/v1alpha2/expansion_generated.go | 47 + .../resource/v1alpha2/podschedulingcontext.go | 99 + .../resource/v1alpha2/resourceclaim.go | 99 + .../v1alpha2/resourceclaimtemplate.go | 99 + .../resource/v1alpha2/resourceclass.go | 68 + .../storage/v1alpha1/expansion_generated.go | 4 + .../storage/v1alpha1/volumeattributesclass.go | 68 + vendor/k8s.io/client-go/openapi/client.go | 7 +- .../k8s.io/client-go/openapi/groupversion.go | 47 +- .../k8s.io/client-go/openapi/typeconverter.go | 48 + .../pkg/apis/clientauthentication/types.go | 5 + .../pkg/apis/clientauthentication/v1/types.go | 5 + .../v1/zz_generated.conversion.go | 2 + .../clientauthentication/v1beta1/types.go | 5 + .../v1beta1/zz_generated.conversion.go | 2 + vendor/k8s.io/client-go/pkg/version/base.go | 3 +- .../plugin/pkg/client/auth/exec/exec.go | 19 +- .../plugin/pkg/client/auth/gcp/gcp.go | 389 - .../plugin/pkg/client/auth/gcp/gcp_stub.go | 36 + .../plugin/pkg/client/auth/oidc/oidc.go | 4 +- vendor/k8s.io/client-go/rest/client.go | 3 +- vendor/k8s.io/client-go/rest/config.go | 20 +- vendor/k8s.io/client-go/rest/exec.go | 4 +- vendor/k8s.io/client-go/rest/request.go | 124 +- vendor/k8s.io/client-go/rest/transport.go | 5 +- vendor/k8s.io/client-go/rest/url_utils.go | 4 +- vendor/k8s.io/client-go/rest/with_retry.go | 38 +- .../k8s.io/client-go/restmapper/shortcut.go | 34 +- vendor/k8s.io/client-go/testing/fixture.go | 2 +- .../forked/golang/template/exec.go | 52 - .../forked/golang/template/funcs.go | 177 - .../k8s.io/client-go/tools/auth/clientauth.go | 5 +- .../client-go/tools/cache/controller.go | 88 +- .../client-go/tools/cache/delta_fifo.go | 142 +- .../client-go/tools/cache/expiration_cache.go | 2 - vendor/k8s.io/client-go/tools/cache/fifo.go | 14 +- .../k8s.io/client-go/tools/cache/listers.go | 20 +- .../client-go/tools/cache/object-names.go | 65 + .../k8s.io/client-go/tools/cache/reflector.go | 501 +- .../reflector_data_consistency_detector.go | 119 + .../client-go/tools/cache/shared_informer.go | 327 +- vendor/k8s.io/client-go/tools/cache/store.go | 31 +- .../client-go/tools/cache/synctrack/lazy.go | 83 + .../tools/cache/synctrack/synctrack.go | 120 + .../tools/cache/thread_safe_store.go | 303 +- .../client-go/tools/clientcmd/api/helpers.go | 91 +- .../client-go/tools/clientcmd/api/types.go | 19 +- .../client-go/tools/clientcmd/api/v1/types.go | 5 + .../api/v1/zz_generated.conversion.go | 2 + .../client-go/tools/clientcmd/auth_loaders.go | 3 +- .../tools/clientcmd/client_config.go | 7 +- .../client-go/tools/clientcmd/loader.go | 33 +- .../tools/clientcmd/merged_client_builder.go | 4 +- .../client-go/tools/clientcmd/overrides.go | 42 +- .../tools/internal/events/interfaces.go | 59 + .../tools/leaderelection/leaderelection.go | 20 +- .../resourcelock/configmaplock.go | 126 - .../resourcelock/endpointslock.go | 121 - .../leaderelection/resourcelock/interface.go | 42 +- .../leaderelection/resourcelock/leaselock.go | 12 +- .../k8s.io/client-go/tools/metrics/metrics.go | 65 + vendor/k8s.io/client-go/tools/pager/pager.go | 41 +- vendor/k8s.io/client-go/tools/record/event.go | 214 +- vendor/k8s.io/client-go/tools/record/fake.go | 30 +- .../k8s.io/client-go/tools/reference/ref.go | 2 +- vendor/k8s.io/client-go/transport/cache.go | 22 +- .../k8s.io/client-go/transport/cache_go118.go | 24 + vendor/k8s.io/client-go/transport/config.go | 14 +- .../client-go/transport/token_source.go | 4 +- .../k8s.io/client-go/transport/transport.go | 83 +- vendor/k8s.io/client-go/util/cert/cert.go | 44 +- vendor/k8s.io/client-go/util/cert/io.go | 7 +- vendor/k8s.io/client-go/util/jsonpath/doc.go | 20 - .../client-go/util/jsonpath/jsonpath.go | 579 - vendor/k8s.io/client-go/util/jsonpath/node.go | 256 - .../k8s.io/client-go/util/jsonpath/parser.go | 527 - vendor/k8s.io/client-go/util/keyutil/key.go | 9 +- .../util/workqueue/delaying_queue.go | 61 +- .../client-go/util/workqueue/metrics.go | 9 +- .../k8s.io/client-go/util/workqueue/queue.go | 108 +- .../util/workqueue/rate_limiting_queue.go | 61 +- vendor/k8s.io/code-generator/README.md | 2 +- .../cmd/applyconfiguration-gen/args/args.go | 81 + .../args/externaltypes.go | 122 + .../generators/applyconfiguration.go | 423 + .../generators/internal.go | 99 + .../generators/jsontagutil.go | 99 + .../generators/openapi.go | 198 + .../generators/packages.go | 297 + .../generators/refgraph.go | 179 + .../generators/types.go | 33 + .../applyconfiguration-gen/generators/util.go | 163 + .../cmd/applyconfiguration-gen/main.go | 54 + .../client-gen/generators/client_generator.go | 26 +- .../fake/generator_fake_for_type.go | 73 +- .../generators/generator_for_clientset.go | 3 +- .../code-generator/cmd/client-gen/main.go | 1 - .../code-generator/cmd/conversion-gen/main.go | 5 - .../code-generator/cmd/deepcopy-gen/main.go | 5 - .../code-generator/cmd/defaulter-gen/main.go | 5 - .../cmd/go-to-protobuf/protobuf/cmd.go | 33 +- .../cmd/go-to-protobuf/protobuf/parser.go | 6 +- .../code-generator/cmd/import-boss/main.go | 3 - .../cmd/informer-gen/generators/factory.go | 108 +- .../cmd/informer-gen/generators/types.go | 1 + .../code-generator/cmd/informer-gen/main.go | 1 - .../code-generator/cmd/lister-gen/main.go | 1 - .../cmd/register-gen/generators/packages.go | 2 +- .../code-generator/cmd/register-gen/main.go | 2 - .../k8s.io/code-generator/cmd/set-gen/main.go | 2 - .../k8s.io/code-generator/generate-groups.sh | 69 +- .../generate-internal-groups.sh | 219 +- vendor/k8s.io/code-generator/kube_codegen.sh | 651 + .../k8s.io/code-generator/pkg/util/build.go | 39 - vendor/k8s.io/code-generator/tools.go | 1 + .../kube-openapi/pkg/builder3/util/util.go | 51 - .../k8s.io/kube-openapi/pkg/cached/cache.go | 290 + .../k8s.io/kube-openapi/pkg/common/common.go | 89 +- .../kube-openapi/pkg/generators/api_linter.go | 3 +- .../kube-openapi/pkg/generators/enum.go | 20 +- .../kube-openapi/pkg/generators/markers.go | 575 + .../kube-openapi/pkg/generators/openapi.go | 301 +- .../pkg/generators/rules/idl_tag.go | 3 +- .../pkg/generators/rules/names_match.go | 54 +- .../kube-openapi/pkg/handler3/handler.go | 246 +- .../k8s.io/kube-openapi/pkg/internal/flags.go | 25 + .../pkg/internal/handler/handler_cache.go | 57 - .../pkg/internal/serialization.go | 65 + .../go-json-experiment/json/AUTHORS | 3 + .../go-json-experiment/json/CONTRIBUTORS | 3 + .../go-json-experiment/json/LICENSE | 27 + .../go-json-experiment/json/README.md | 321 + .../go-json-experiment/json/arshal.go | 513 + .../go-json-experiment/json/arshal_any.go | 238 + .../go-json-experiment/json/arshal_default.go | 1485 +++ .../go-json-experiment/json/arshal_funcs.go | 387 + .../go-json-experiment/json/arshal_inlined.go | 213 + .../go-json-experiment/json/arshal_methods.go | 229 + .../go-json-experiment/json/arshal_time.go | 241 + .../go-json-experiment/json/decode.go | 1655 +++ .../go-json-experiment/json/doc.go | 182 + .../go-json-experiment/json/encode.go | 1170 ++ .../go-json-experiment/json/errors.go | 183 + .../go-json-experiment/json/fields.go | 509 + .../go-json-experiment/json/fold.go | 56 + .../go-json-experiment/json/intern.go | 86 + .../go-json-experiment/json/pools.go | 182 + .../go-json-experiment/json/state.go | 747 ++ .../go-json-experiment/json/token.go | 522 + .../go-json-experiment/json/value.go | 381 + .../kube-openapi/pkg/openapiconv/convert.go | 322 - .../kube-openapi/pkg/schemaconv/openapi.go | 260 + .../pkg/schemaconv/proto_models.go | 178 + .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 309 +- .../kube-openapi/pkg/schemamutation/walker.go | 519 - .../k8s.io/kube-openapi/pkg/spec3/encoding.go | 43 +- .../k8s.io/kube-openapi/pkg/spec3/example.go | 39 +- .../pkg/spec3/external_documentation.go | 34 +- vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go | 281 + .../k8s.io/kube-openapi/pkg/spec3/header.go | 52 + .../kube-openapi/pkg/spec3/media_type.go | 42 +- .../kube-openapi/pkg/spec3/operation.go | 49 +- .../kube-openapi/pkg/spec3/parameter.go | 53 + vendor/k8s.io/kube-openapi/pkg/spec3/path.go | 129 +- .../kube-openapi/pkg/spec3/request_body.go | 44 +- .../k8s.io/kube-openapi/pkg/spec3/response.go | 173 +- .../pkg/spec3/security_requirement.go | 56 - .../kube-openapi/pkg/spec3/security_scheme.go | 19 +- .../k8s.io/kube-openapi/pkg/spec3/server.go | 67 +- vendor/k8s.io/kube-openapi/pkg/spec3/spec.go | 38 + .../kube-openapi/pkg/util/proto/document.go | 2 +- .../pkg/util/proto/document_v3.go | 6 +- .../pkg/validation/spec/gnostic.go | 8 +- .../pkg/validation/spec/header.go | 43 + .../kube-openapi/pkg/validation/spec/info.go | 45 + .../kube-openapi/pkg/validation/spec/items.go | 71 + .../pkg/validation/spec/operation.go | 50 + .../pkg/validation/spec/parameter.go | 93 +- .../pkg/validation/spec/path_item.go | 39 + .../kube-openapi/pkg/validation/spec/paths.go | 79 + .../kube-openapi/pkg/validation/spec/ref.go | 18 +- .../pkg/validation/spec/response.go | 55 +- .../pkg/validation/spec/responses.go | 106 +- .../pkg/validation/spec/schema.go | 118 + .../pkg/validation/spec/security_scheme.go | 28 + .../pkg/validation/spec/swagger.go | 171 +- .../kube-openapi/pkg/validation/spec/tag.go | 32 + vendor/k8s.io/utils/integer/integer.go | 8 +- vendor/k8s.io/utils/lru/lru.go | 10 +- vendor/k8s.io/utils/net/ipfamily.go | 181 + vendor/k8s.io/utils/net/net.go | 126 +- vendor/k8s.io/utils/net/port.go | 18 +- vendor/k8s.io/utils/pointer/pointer.go | 291 +- vendor/k8s.io/utils/ptr/README.md | 3 + vendor/k8s.io/utils/ptr/ptr.go | 73 + vendor/k8s.io/utils/trace/trace.go | 49 +- .../eventing/v1alpha1/kafka_sink_lifecycle.go | 9 +- .../v1beta1/kafka_channel_lifecycle.go | 6 +- .../v1beta1/kafka_channel_validation.go | 44 + .../pkg/config/eventingkafkaconfig.go | 2 +- .../control-plane/pkg/contract/contract.pb.go | 237 +- .../control-plane/pkg/core/config/utils.go | 11 - .../control-plane/pkg/kafka/client.go | 4 +- .../control-plane/pkg/kafka/consumer_group.go | 2 +- .../pkg/kafka/consumer_group_lag.go | 2 +- .../control-plane/pkg/kafka/offset/offsets.go | 2 +- .../pkg/kafka/sarama/batch_get_offsets.go | 2 +- .../control-plane/pkg/kafka/topic.go | 13 +- .../reconciler/base/receiver_condition_set.go | 28 +- .../pkg/reconciler/broker/broker.go | 15 +- .../pkg/reconciler/broker/controller.go | 2 +- .../broker/namespaced_controller.go | 2 +- .../control-plane/pkg/security/scram.go | 2 +- .../control-plane/pkg/security/secret.go | 4 +- .../test/lib/resources/kafkachannel.go | 8 +- .../eventing-kafka-broker/test/pkg/run.go | 9 +- .../test/rekt/features/broker_auth.go | 10 + .../test/rekt/features/ce_extensions.go | 168 + .../test/rekt/features/kafka_source.go | 18 +- .../kafkaauthsecret/kafkaauthsecret.go | 20 + .../resources/kafkachannel/kafkachannel.go | 2 +- .../rekt/resources/kafkasink/kafkasink.go | 2 +- .../rekt/resources/kafkasource/kafkasource.go | 4 + .../test/upgrade/installation/shell.go | 12 +- .../pkg/apis/duck/v1/delivery_types.go | 13 +- .../pkg/apis/duck/v1/subscribable_types.go | 14 + .../pkg/apis/duck/v1/zz_generated.deepcopy.go | 29 +- .../eventing/pkg/apis/eventing/register.go | 4 + .../pkg/apis/eventing/v1/broker_lifecycle.go | 10 +- .../pkg/apis/eventing/v1/test_helper.go | 9 +- .../pkg/apis/eventing/v1/trigger_lifecycle.go | 29 +- .../pkg/apis/eventing/v1/trigger_types.go | 8 + .../apis/eventing/v1/zz_generated.deepcopy.go | 10 + .../apis/eventing/v1beta1/eventtype_types.go | 1 + .../eventing/pkg/apis/eventing/v1beta3/doc.go | 20 + .../eventing/v1beta3/eventtype_conversion.go} | 24 +- .../eventing/v1beta3/eventtype_defaults.go | 34 + .../eventing/v1beta3/eventtype_lifecycle.go | 65 + .../apis/eventing/v1beta3/eventtype_types.go | 123 + .../eventing/v1beta3/eventtype_validation.go | 62 + .../pkg/apis/eventing/v1beta3}/register.go | 18 +- .../v1beta3}/zz_generated.deepcopy.go | 97 +- .../eventing/pkg/apis/feature/features.go | 7 +- .../eventing/pkg/apis/feature/flag_names.go | 1 + .../pkg/apis/flows/v1/parallel_lifecycle.go | 21 +- .../pkg/apis/flows/v1/parallel_types.go | 4 + .../pkg/apis/flows/v1/sequence_lifecycle.go | 32 +- .../pkg/apis/flows/v1/sequence_types.go | 4 + .../apis/flows/v1/zz_generated.deepcopy.go | 10 + .../v1/in_memory_channel_validation.go | 50 + .../messaging/v1/subscription_lifecycle.go | 20 +- .../apis/messaging/v1/subscription_types.go | 14 + .../messaging/v1/zz_generated.deepcopy.go | 15 + .../eventing/pkg/apis/sources/register.go | 6 + .../apis/sources/v1/apiserver_lifecycle.go | 21 + .../apis/sources/v1/container_lifecycle.go | 19 + .../pkg/apis/sources/v1/ping_lifecycle.go | 23 +- .../pkg/apis/sources/v1/ping_validation.go | 6 +- .../apis/sources/v1/sinkbinding_lifecycle.go | 79 + .../pkg/apis/sources/v1/sinkbinding_types.go | 12 + .../apis/sources/v1/zz_generated.deepcopy.go | 5 + .../apis/sources/v1beta2/ping_validation.go | 6 +- .../knative.dev/eventing/pkg/auth/audience.go | 32 + .../eventing/pkg/auth/serviceaccount.go | 149 + .../eventing/pkg/auth/token_provider.go | 96 + .../eventing/pkg/auth/token_verifier.go | 181 + vendor/knative.dev/eventing/pkg/auth/utils.go | 64 + .../client/clientset/versioned/clientset.go | 13 + .../versioned/fake/clientset_generated.go | 7 + .../clientset/versioned/fake/register.go | 2 + .../clientset/versioned/scheme/register.go | 2 + .../typed/eventing/v1/fake/fake_broker.go | 57 +- .../typed/eventing/v1/fake/fake_trigger.go | 57 +- .../eventing/v1beta1/fake/fake_eventtype.go | 5 +- .../eventing/v1beta2/fake/fake_eventtype.go | 5 +- .../versioned/typed/eventing/v1beta3/doc.go | 20 + .../typed/eventing/v1beta3/eventing_client.go | 107 + .../typed/eventing/v1beta3/eventtype.go | 195 + .../{ => typed/eventing/v1beta3/fake}/doc.go | 4 +- .../v1beta3/fake/fake_eventing_client.go | 40 + .../eventing/v1beta3/fake/fake_eventtype.go | 141 + .../eventing/v1beta3/generated_expansion.go | 21 + .../typed/flows/v1/fake/fake_parallel.go | 57 +- .../typed/flows/v1/fake/fake_sequence.go | 57 +- .../typed/messaging/v1/fake/fake_channel.go | 57 +- .../messaging/v1/fake/fake_inmemorychannel.go | 57 +- .../messaging/v1/fake/fake_subscription.go | 57 +- .../sources/v1/fake/fake_apiserversource.go | 57 +- .../sources/v1/fake/fake_containersource.go | 57 +- .../typed/sources/v1/fake/fake_pingsource.go | 57 +- .../typed/sources/v1/fake/fake_sinkbinding.go | 57 +- .../sources/v1beta2/fake/fake_pingsource.go | 5 +- .../externalversions/eventing/interface.go | 8 + .../eventing/v1beta3/eventtype.go | 90 + .../eventing/v1beta3/interface.go | 45 + .../informers/externalversions/factory.go | 4 +- .../informers/externalversions/generic.go | 5 + .../eventing/v1/broker/reconciler.go | 10 +- .../listers/eventing/v1beta3/eventtype.go | 99 + .../eventing/v1beta3/expansion_generated.go | 27 + .../pkg/kncloudevents/event_dispatcher.go | 42 +- .../eventing/pkg/metrics/metrics.go | 3 + .../reconciler/testing/v1/apiserversouce.go | 35 + .../pkg/reconciler/testing/v1/broker.go | 21 +- .../reconciler/testing/v1/containersource.go | 31 + .../pkg/reconciler/testing/v1/listers.go | 4 + .../pkg/reconciler/testing/v1/parallel.go | 30 + .../pkg/reconciler/testing/v1/pingsource.go | 31 + .../pkg/reconciler/testing/v1/sequence.go | 30 + .../pkg/reconciler/testing/v1/subscription.go | 32 + .../pkg/reconciler/testing/v1/trigger.go | 36 + .../knative.dev/eventing/test/e2e-common.sh | 75 +- .../eventing/test/e2e-conformance-tests.sh | 2 +- .../eventing/test/e2e-rekt-tests.sh | 8 +- vendor/knative.dev/eventing/test/e2e-tests.sh | 2 +- .../eventing/test/e2e-upgrade-tests.sh | 2 +- .../eventing/test/e2e/helpers/README.md | 2 +- vendor/knative.dev/eventing/test/e2e_flags.go | 2 +- .../test/lib/recordevents/resources.go | 4 - .../eventing/test/lib/resources/eventing.go | 11 + .../test/rekt/features/broker/data_plane.go | 12 +- .../test/rekt/features/broker/feature.go | 10 +- .../test/rekt/features/broker/oidc_feature.go | 223 + .../test/rekt/features/broker/topology.go | 2 +- .../rekt/features/channel/control_plane.go | 113 +- .../test/rekt/features/channel/data_plane.go | 4 +- .../test/rekt/features/channel/features.go | 14 +- .../rekt/features/channel/oidc_feature.go | 66 + .../test/rekt/features/channel/topology.go | 2 +- .../features/featureflags/featureflags.go | 14 + .../test/rekt/features/knconf/data_plane.go | 2 +- .../test/rekt/features/pingsource/features.go | 56 + .../rekt/features/pingsource/oidc_feature.go | 60 + .../rekt/resources/addressable/addressable.go | 27 +- .../apiserversource/apiserversource.go | 5 + .../apiserversource/apiserversource.yaml | 3 + .../test/rekt/resources/broker/broker.go | 45 +- .../test/rekt/resources/broker/broker.yaml | 7 + .../resources/channel_impl/channel_impl.go | 14 +- .../containersource/containersource.go | 3 + .../containersource/containersource.yaml | 3 + .../test/rekt/resources/delivery/delivery.go | 47 + .../rekt/resources/delivery/delivery.yaml | 7 + .../rekt/resources/eventtype/eventtype.go | 25 +- .../test/rekt/resources/parallel/parallel.go | 115 +- .../rekt/resources/parallel/parallel.yaml | 28 + .../rekt/resources/pingsource/pingsource.go | 5 +- .../rekt/resources/pingsource/pingsource.yaml | 3 + .../test/rekt/resources/sequence/sequence.go | 84 + .../rekt/resources/sequence/sequence.yaml | 14 + .../resources/subscription/subscription.go | 9 +- .../resources/subscription/subscription.yaml | 3 + .../test/rekt/resources/trigger/trigger.go | 35 +- .../test/rekt/resources/trigger/trigger.yaml | 5 +- .../test/upgrade/prober/configuration.go | 5 +- .../eventing/test/upgrade/prober/verify.go | 5 +- .../upgrade/prober/wathola/config/defaults.go | 3 +- .../prober/wathola/config/structure.go | 1 - .../upgrade/prober/wathola/event/services.go | 26 +- .../prober/wathola/receiver/services.go | 1 - vendor/knative.dev/hack/README.md | 31 +- vendor/knative.dev/hack/codegen-library.sh | 5 +- vendor/knative.dev/hack/e2e-tests.sh | 9 +- vendor/knative.dev/hack/{hack.go => embed.go} | 15 +- vendor/knative.dev/hack/go.work | 1 + vendor/knative.dev/hack/go.work.sum | 12 - vendor/knative.dev/hack/infra-library.sh | 41 +- vendor/knative.dev/hack/library.sh | 70 +- vendor/knative.dev/hack/performance-tests.sh | 16 +- vendor/knative.dev/hack/presubmit-tests.sh | 2 +- vendor/knative.dev/hack/release.sh | 4 +- .../apis/networking/metadata_validation.go | 3 +- .../networking/pkg/apis/networking/ports.go | 13 + .../pkg/apis/networking/register.go | 32 +- .../networking/v1alpha1/ingress_helpers.go | 55 + .../v1alpha1/fake/fake_certificate.go | 5 +- .../v1alpha1/fake/fake_clusterdomainclaim.go | 5 +- .../networking/v1alpha1/fake/fake_domain.go | 5 +- .../networking/v1alpha1/fake/fake_ingress.go | 5 +- .../networking/v1alpha1/fake/fake_realm.go | 5 +- .../v1alpha1/fake/fake_serverlessservice.go | 5 +- .../informers/externalversions/factory.go | 4 +- .../networking/v1alpha1/ingress/reconciler.go | 10 +- .../networking/pkg/config/config.go | 161 +- .../operator/base/ingressconfiguration.go | 9 + .../operator/pkg/apis/operator/register.go | 2 +- .../informers/externalversions/factory.go | 4 +- .../v1beta1/knativeeventing/reconciler.go | 10 +- .../v1beta1/knativeserving/reconciler.go | 10 +- .../common/poddisruptionbudget_override.go | 26 +- .../operator/pkg/reconciler/common/stages.go | 14 +- .../reconciler/common/workload_override.go | 13 +- .../reconciler/knativeeventing/controller.go | 19 - .../knativeeventing/eventing_tls.go | 2 +- .../knativeeventing/knativeeventing.go | 13 +- .../knativeserving/ingress/istio.go | 1 - .../knativeserving/ingress/kourier.go | 65 +- .../knativeserving/knativeserving.go | 7 + .../knativeserving/security/securityguard.go | 8 +- .../pkg/apis/duck/v1/addressable_types.go | 4 + .../pkg/apis/duck/v1/auth_types.go | 25 + .../pkg/apis/duck/v1/destination.go | 8 + .../pkg/apis/duck/v1/source_types.go | 9 + .../pkg/apis/duck/v1/zz_generated.deepcopy.go | 41 + .../generators/reconciler_reconciler.go | 6 +- .../pkg/configmap/informer/synced_callback.go | 4 +- vendor/knative.dev/pkg/configmap/parse.go | 6 +- .../pkg/environment/client_config.go | 6 +- vendor/knative.dev/pkg/hack/update-codegen.sh | 2 +- vendor/knative.dev/pkg/hash/bucketer.go | 10 +- vendor/knative.dev/pkg/hash/hash.go | 12 +- .../knative.dev/pkg/leaderelection/context.go | 4 +- .../pkg/logging/object_encoders.go | 2 +- vendor/knative.dev/pkg/metrics/metrics.go | 6 +- vendor/knative.dev/pkg/network/h2c.go | 8 +- vendor/knative.dev/pkg/network/transports.go | 15 +- vendor/knative.dev/pkg/profiling/server.go | 7 +- .../pkg/reconciler/testing/context.go | 11 +- .../pkg/reconciler/testing/hooks.go | 14 +- .../pkg/reconciler/testing/table.go | 4 +- .../pkg/resolver/addressable_resolver.go | 26 +- .../pkg/test/spoof/response_checks.go | 4 +- .../test/upgrade}/shell/executor.go | 65 +- .../test/upgrade}/shell/fail-example.sh | 2 +- .../test/upgrade}/shell/prefixer.go | 2 +- .../test/upgrade}/shell/project.go | 6 +- .../{hack => pkg/test/upgrade}/shell/types.go | 21 +- vendor/knative.dev/pkg/test/zipkin/util.go | 2 +- vendor/knative.dev/pkg/version/version.go | 4 +- vendor/knative.dev/pkg/webhook/helper.go | 14 + .../conversion/reconciler.go | 4 - vendor/knative.dev/pkg/webhook/webhook.go | 16 +- .../pkg/eventshub/103-pod.yaml | 19 +- .../eventshub/105-certificate-service.yaml | 2 +- .../pkg/eventshub/assert/step.go | 6 +- .../pkg/eventshub/event_info_store.go | 24 +- .../reconciler-test/pkg/eventshub/options.go | 55 +- .../pkg/eventshub/rbac/101-rbac.yaml | 17 + .../pkg/eventshub/resources.go | 100 + .../reconciler-test/pkg/eventshub/utils.go | 8 + .../reconciler-test/pkg/images/ko/publish.go | 22 +- .../serviceaccount/serviceaccount.go | 46 + .../serviceaccount/serviceaccount.yaml | 17 + vendor/knative.dev/serving/AUTHORS | 11 - .../serving/pkg/apis/config/defaults.go | 30 +- .../serving/pkg/apis/config/features.go | 6 + .../serving/pkg/apis/serving/fieldmask.go | 12 +- .../serving/pkg/apis/serving/k8s_lifecycle.go | 51 +- .../pkg/apis/serving/k8s_validation.go | 90 +- .../pkg/apis/serving/v1/revision_defaults.go | 37 +- .../pkg/apis/serving/v1/revision_helpers.go | 19 + .../pkg/apis/serving/v1/revision_lifecycle.go | 25 +- .../pkg/apis/serving/v1/route_lifecycle.go | 13 +- .../pkg/apis/serving/v1alpha1/README.md | 9 - .../apis/serving/v1alpha1/conversion_error.go | 51 - .../serving/pkg/apis/serving/v1alpha1/doc.go | 24 - .../v1alpha1/domainmapping_lifecycle.go | 180 - .../serving/v1alpha1/domainmapping_types.go | 135 - .../v1alpha1/domainmapping_validation.go | 73 - .../v1beta1/domainmapping_lifecycle.go | 6 +- .../client/clientset/versioned/clientset.go | 13 - .../clientset/versioned/scheme/register.go | 2 - .../typed/serving/v1alpha1/domainmapping.go | 195 - .../serving/pkg/testing/v1/revision.go | 6 + .../serving/pkg/testing/v1/route.go | 8 +- vendor/knative.dev/serving/test/README.md | 2 +- .../knative.dev/serving/test/adding_tests.md | 2 +- vendor/knative.dev/serving/test/clients.go | 37 +- .../knative.dev/serving/test/conformance.go | 2 - vendor/knative.dev/serving/test/e2e-common.sh | 64 +- ...ts.sh => e2e-external-domain-tls-tests.sh} | 114 +- .../test/e2e-internal-encryption-tests.sh | 19 - .../serving/test/e2e-networking-library.sh | 12 +- vendor/knative.dev/serving/test/e2e-tests.sh | 105 +- .../serving/test/e2e-upgrade-tests.sh | 2 +- .../knative.dev/serving/test/e2e/autoscale.go | 75 +- vendor/knative.dev/serving/test/e2e/e2e.go | 10 +- .../knative.dev/serving/test/e2e/websocket.go | 2 +- .../test/upgrade/deployment_failure.go | 134 + .../serving/test/upgrade/postupgrade.go | 1 + .../serving/test/upgrade/preupgrade.go | 1 + .../serving/test/v1/configuration.go | 2 +- .../knative.dev/serving/test/v1/revision.go | 2 +- vendor/knative.dev/serving/test/v1/route.go | 2 +- vendor/knative.dev/serving/test/v1/service.go | 2 +- vendor/modules.txt | 253 +- .../controller-runtime/pkg/cache/cache.go | 512 +- .../pkg/cache/delegating_by_gvk_cache.go | 135 + .../pkg/cache/informer_cache.go | 157 +- .../pkg/cache/internal/cache_reader.go | 91 +- .../pkg/cache/internal/deleg_map.go | 126 - .../pkg/cache/internal/disabledeepcopy.go | 35 - .../pkg/cache/internal/informers.go | 587 + .../pkg/cache/internal/informers_map.go | 480 - .../pkg/cache/internal/selector.go | 15 - .../pkg/cache/internal/transformers.go | 50 - .../pkg/cache/multi_namespace_cache.go | 261 +- .../pkg/certwatcher/certwatcher.go | 54 +- .../pkg/client/apiutil/apimachinery.go | 105 +- .../pkg/client/apiutil/dynamicrestmapper.go | 294 - .../pkg/client/apiutil/errors.go | 54 + .../pkg/client/apiutil/restmapper.go | 335 + .../controller-runtime/pkg/client/client.go | 415 +- ...ient_cache.go => client_rest_resources.go} | 27 +- .../pkg/client/config/config.go | 44 +- .../controller-runtime/pkg/client/doc.go | 14 +- .../controller-runtime/pkg/client/dryrun.go | 50 +- .../pkg/client/fake/client.go | 672 +- .../controller-runtime/pkg/client/fake/doc.go | 15 +- .../pkg/client/interceptor/intercept.go | 166 + .../pkg/client/interfaces.go | 92 +- .../pkg/client/metadata_client.go | 23 +- .../pkg/client/namespaced_client.go | 99 +- .../controller-runtime/pkg/client/options.go | 157 +- .../controller-runtime/pkg/client/patch.go | 2 +- .../controller-runtime/pkg/client/split.go | 141 - .../pkg/client/typed_client.go | 130 +- .../pkg/client/unstructured_client.go | 186 +- .../controller-runtime/pkg/client/watch.go | 30 +- .../controller-runtime/pkg/cluster/cluster.go | 180 +- .../pkg/cluster/internal.go | 41 +- .../controller-runtime/pkg/config/config.go | 16 +- .../pkg/config/controller.go | 49 + .../controller-runtime/pkg/config/doc.go | 10 +- .../pkg/config/v1alpha1/doc.go | 2 + .../pkg/config/v1alpha1/register.go | 6 + .../pkg/config/v1alpha1/types.go | 24 +- .../config/v1alpha1/zz_generated.deepcopy.go | 6 +- .../pkg/controller/controller.go | 49 +- .../controllerutil/controllerutil.go | 118 +- .../controller-runtime/pkg/controller/doc.go | 2 +- .../controller-runtime/pkg/handler/doc.go | 2 +- .../controller-runtime/pkg/handler/enqueue.go | 10 +- .../pkg/handler/enqueue_mapped.go | 37 +- .../pkg/handler/enqueue_owner.go | 86 +- .../pkg/handler/eventhandler.go | 36 +- .../controller-runtime/pkg/healthz/healthz.go | 12 +- .../pkg/internal/controller/controller.go | 78 +- .../internal/controller/metrics/metrics.go | 8 + .../pkg/internal/field/selector/utils.go | 37 + .../pkg/internal/objectutil/objectutil.go | 36 - .../pkg/internal/recorder/recorder.go | 11 +- .../source/event_handler.go} | 58 +- .../pkg/internal/source/kind.go | 117 + .../pkg/internal/syncs/syncs.go | 38 + .../controller-runtime/pkg/log/deleg.go | 58 +- .../controller-runtime/pkg/log/log.go | 59 +- .../pkg/log/zap/kube_helpers.go | 22 - .../controller-runtime/pkg/log/zap/zap.go | 46 +- .../pkg/manager/internal.go | 231 +- .../controller-runtime/pkg/manager/manager.go | 359 +- .../pkg/manager/runnable_group.go | 6 +- .../controller-runtime/pkg/manager/server.go | 61 + .../pkg/metrics/client_go_adapter.go | 174 +- .../pkg/metrics/leaderelection.go | 40 + .../pkg/metrics/listener.go | 52 - .../pkg/metrics/server/doc.go | 26 + .../pkg/metrics/server/server.go | 312 + .../pkg/metrics/workqueue.go | 8 +- .../pkg/predicate/predicate.go | 56 +- .../pkg/reconcile/reconcile.go | 88 +- .../pkg/runtime/inject/doc.go | 22 - .../pkg/runtime/inject/inject.go | 164 - .../controller-runtime/pkg/scheme/scheme.go | 45 +- .../controller-runtime/pkg/source/source.go | 178 +- .../pkg/webhook/admission/decode.go | 17 +- .../pkg/webhook/admission/defaulter.go | 30 +- .../pkg/webhook/admission/defaulter_custom.go | 26 +- .../pkg/webhook/admission/doc.go | 6 - .../pkg/webhook/admission/http.go | 72 +- .../pkg/webhook/admission/inject.go | 31 - .../pkg/webhook/admission/multi.go | 52 - .../pkg/webhook/admission/response.go | 23 +- .../pkg/webhook/admission/validator.go | 107 +- .../pkg/webhook/admission/validator_custom.go | 52 +- .../pkg/webhook/admission/webhook.go | 140 +- .../controller-runtime/pkg/webhook/alias.go | 2 + .../controller-runtime/pkg/webhook/server.go | 255 +- .../v4/merge/conflict.go | 121 + .../structured-merge-diff/v4/merge/update.go | 358 + 2180 files changed, 198657 insertions(+), 66549 deletions(-) rename vendor/github.com/{Shopify => IBM}/sarama/.gitignore (100%) rename vendor/github.com/{Shopify => IBM}/sarama/.golangci.yml (78%) create mode 100644 vendor/github.com/IBM/sarama/.pre-commit-config.yaml create mode 100644 vendor/github.com/IBM/sarama/CHANGELOG.md create mode 100644 vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/IBM/sarama/CONTRIBUTING.md create mode 100644 vendor/github.com/IBM/sarama/Dockerfile.kafka rename vendor/github.com/{Shopify/sarama/LICENSE => IBM/sarama/LICENSE.md} (95%) rename vendor/github.com/{Shopify => IBM}/sarama/Makefile (100%) rename vendor/github.com/{Shopify => IBM}/sarama/README.md (63%) create mode 100644 vendor/github.com/IBM/sarama/SECURITY.md rename vendor/github.com/{Shopify => IBM}/sarama/Vagrantfile (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_bindings.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_create_request.go (94%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_create_response.go (86%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_delete_request.go (92%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_delete_response.go (92%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_describe_request.go (82%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_describe_response.go (90%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_filter.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/acl_types.go (93%) rename vendor/github.com/{Shopify => IBM}/sarama/add_offsets_to_txn_request.go (80%) rename vendor/github.com/{Shopify => IBM}/sarama/add_offsets_to_txn_response.go (72%) rename vendor/github.com/{Shopify => IBM}/sarama/add_partitions_to_txn_request.go (83%) rename vendor/github.com/{Shopify => IBM}/sarama/add_partitions_to_txn_response.go (85%) rename vendor/github.com/{Shopify => IBM}/sarama/admin.go (88%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_client_quotas_request.go (97%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_client_quotas_response.go (94%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_configs_request.go (90%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_configs_response.go (78%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_partition_reassignments_request.go (96%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_partition_reassignments_response.go (93%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_user_scram_credentials_request.go (97%) rename vendor/github.com/{Shopify => IBM}/sarama/alter_user_scram_credentials_response.go (91%) rename vendor/github.com/{Shopify => IBM}/sarama/api_versions_request.go (89%) rename vendor/github.com/{Shopify => IBM}/sarama/api_versions_response.go (91%) rename vendor/github.com/{Shopify => IBM}/sarama/async_producer.go (97%) rename vendor/github.com/{Shopify => IBM}/sarama/balance_strategy.go (94%) rename vendor/github.com/{Shopify => IBM}/sarama/broker.go (90%) rename vendor/github.com/{Shopify => IBM}/sarama/client.go (85%) rename vendor/github.com/{Shopify => IBM}/sarama/compress.go (99%) rename vendor/github.com/{Shopify => IBM}/sarama/config.go (95%) rename vendor/github.com/{Shopify => IBM}/sarama/config_resource_type.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/consumer.go (93%) rename vendor/github.com/{Shopify => IBM}/sarama/consumer_group.go (85%) rename vendor/github.com/{Shopify => IBM}/sarama/consumer_group_members.go (73%) rename vendor/github.com/{Shopify => IBM}/sarama/consumer_metadata_request.go (75%) rename vendor/github.com/{Shopify => IBM}/sarama/consumer_metadata_response.go (86%) rename vendor/github.com/{Shopify => IBM}/sarama/control_record.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/crc32_field.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/create_partitions_request.go (90%) rename vendor/github.com/{Shopify => IBM}/sarama/create_partitions_response.go (86%) rename vendor/github.com/{Shopify => IBM}/sarama/create_topics_request.go (74%) rename vendor/github.com/{Shopify => IBM}/sarama/create_topics_response.go (78%) create mode 100644 vendor/github.com/IBM/sarama/decompress.go rename vendor/github.com/{Shopify => IBM}/sarama/delete_groups_request.go (71%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_groups_response.go (80%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_offsets_request.go (93%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_offsets_response.go (91%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_records_request.go (92%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_records_response.go (92%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_topics_request.go (84%) rename vendor/github.com/{Shopify => IBM}/sarama/delete_topics_response.go (84%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_client_quotas_request.go (96%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_client_quotas_response.go (95%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_configs_request.go (94%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_configs_response.go (92%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_groups_request.go (81%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_groups_response.go (94%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_log_dirs_request.go (92%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_log_dirs_response.go (95%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_user_scram_credentials_request.go (94%) rename vendor/github.com/{Shopify => IBM}/sarama/describe_user_scram_credentials_response.go (95%) rename vendor/github.com/{Shopify => IBM}/sarama/dev.yml (100%) rename vendor/github.com/{Shopify => IBM}/sarama/docker-compose.yml (63%) rename vendor/github.com/{Shopify => IBM}/sarama/encoder_decoder.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/end_txn_request.go (80%) rename vendor/github.com/{Shopify => IBM}/sarama/end_txn_response.go (71%) create mode 100755 vendor/github.com/IBM/sarama/entrypoint.sh rename vendor/github.com/{Shopify => IBM}/sarama/errors.go (69%) rename vendor/github.com/{Shopify => IBM}/sarama/fetch_request.go (95%) rename vendor/github.com/{Shopify => IBM}/sarama/fetch_response.go (98%) rename vendor/github.com/{Shopify => IBM}/sarama/find_coordinator_request.go (90%) rename vendor/github.com/{Shopify => IBM}/sarama/find_coordinator_response.go (89%) rename vendor/github.com/{Shopify => IBM}/sarama/gssapi_kerberos.go (99%) rename vendor/github.com/{Shopify => IBM}/sarama/heartbeat_request.go (83%) rename vendor/github.com/{Shopify => IBM}/sarama/heartbeat_response.go (71%) rename vendor/github.com/{Shopify => IBM}/sarama/incremental_alter_configs_request.go (96%) rename vendor/github.com/{Shopify => IBM}/sarama/incremental_alter_configs_response.go (86%) rename vendor/github.com/{Shopify => IBM}/sarama/init_producer_id_request.go (91%) rename vendor/github.com/{Shopify => IBM}/sarama/init_producer_id_response.go (85%) rename vendor/github.com/{Shopify => IBM}/sarama/interceptors.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/join_group_request.go (70%) rename vendor/github.com/{Shopify => IBM}/sarama/join_group_response.go (68%) rename vendor/github.com/{Shopify => IBM}/sarama/kerberos_client.go (79%) rename vendor/github.com/{Shopify => IBM}/sarama/leave_group_request.go (88%) rename vendor/github.com/{Shopify => IBM}/sarama/leave_group_response.go (83%) rename vendor/github.com/{Shopify => IBM}/sarama/length_field.go (100%) create mode 100644 vendor/github.com/IBM/sarama/list_groups_request.go create mode 100644 vendor/github.com/IBM/sarama/list_groups_response.go rename vendor/github.com/{Shopify => IBM}/sarama/list_partition_reassignments_request.go (95%) rename vendor/github.com/{Shopify => IBM}/sarama/list_partition_reassignments_response.go (94%) rename vendor/github.com/{Shopify => IBM}/sarama/message.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/message_set.go (100%) create mode 100644 vendor/github.com/IBM/sarama/metadata_request.go create mode 100644 vendor/github.com/IBM/sarama/metadata_response.go rename vendor/github.com/{Shopify => IBM}/sarama/metrics.go (97%) rename vendor/github.com/{Shopify => IBM}/sarama/mockbroker.go (87%) rename vendor/github.com/{Shopify => IBM}/sarama/mockkerberos.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/mockresponses.go (90%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_commit_request.go (93%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_commit_response.go (87%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_fetch_request.go (73%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_fetch_response.go (94%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_manager.go (88%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_request.go (76%) rename vendor/github.com/{Shopify => IBM}/sarama/offset_response.go (73%) rename vendor/github.com/{Shopify => IBM}/sarama/packet_decoder.go (98%) rename vendor/github.com/{Shopify => IBM}/sarama/packet_encoder.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/partitioner.go (86%) rename vendor/github.com/{Shopify => IBM}/sarama/prep_encoder.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/produce_request.go (95%) rename vendor/github.com/{Shopify => IBM}/sarama/produce_response.go (91%) rename vendor/github.com/{Shopify => IBM}/sarama/produce_set.go (97%) rename vendor/github.com/{Shopify => IBM}/sarama/quota_types.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/real_decoder.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/real_encoder.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/record.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/record_batch.go (95%) rename vendor/github.com/{Shopify => IBM}/sarama/records.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/request.go (51%) rename vendor/github.com/{Shopify => IBM}/sarama/response_header.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sarama.go (67%) rename vendor/github.com/{Shopify => IBM}/sarama/sasl_authenticate_request.go (89%) rename vendor/github.com/{Shopify => IBM}/sarama/sasl_authenticate_response.go (92%) rename vendor/github.com/{Shopify => IBM}/sarama/sasl_handshake_request.go (78%) rename vendor/github.com/{Shopify => IBM}/sarama/sasl_handshake_response.go (77%) rename vendor/github.com/{Shopify => IBM}/sarama/scram_formatter.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sticky_assignor_user_data.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/sync_group_request.go (93%) rename vendor/github.com/{Shopify => IBM}/sarama/sync_group_response.go (77%) rename vendor/github.com/{Shopify => IBM}/sarama/sync_producer.go (98%) rename vendor/github.com/{Shopify => IBM}/sarama/timestamp.go (100%) rename vendor/github.com/{Shopify => IBM}/sarama/transaction_manager.go (91%) rename vendor/github.com/{Shopify => IBM}/sarama/txn_offset_commit_request.go (73%) rename vendor/github.com/{Shopify => IBM}/sarama/txn_offset_commit_response.go (80%) rename vendor/github.com/{Shopify => IBM}/sarama/utils.go (84%) rename vendor/github.com/{Shopify => IBM}/sarama/version.go (100%) create mode 100644 vendor/github.com/IBM/sarama/zstd.go delete mode 100644 vendor/github.com/Shopify/sarama/CHANGELOG.md delete mode 100644 vendor/github.com/Shopify/sarama/Dockerfile.kafka delete mode 100644 vendor/github.com/Shopify/sarama/decompress.go delete mode 100755 vendor/github.com/Shopify/sarama/entrypoint.sh delete mode 100644 vendor/github.com/Shopify/sarama/list_groups_request.go delete mode 100644 vendor/github.com/Shopify/sarama/list_groups_response.go delete mode 100644 vendor/github.com/Shopify/sarama/metadata_request.go delete mode 100644 vendor/github.com/Shopify/sarama/metadata_response.go delete mode 100644 vendor/github.com/Shopify/sarama/zstd.go create mode 100644 vendor/github.com/coreos/go-oidc/v3/LICENSE create mode 100644 vendor/github.com/coreos/go-oidc/v3/NOTICE create mode 100644 vendor/github.com/coreos/go-oidc/v3/oidc/jose.go create mode 100644 vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go create mode 100644 vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go create mode 100644 vendor/github.com/coreos/go-oidc/v3/oidc/verify.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/fold.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/fuzz.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/indent.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/scanner.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/tables.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/tags.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/.gitignore create mode 100644 vendor/github.com/go-jose/go-jose/v3/.golangci.yml create mode 100644 vendor/github.com/go-jose/go-jose/v3/.travis.yml create mode 100644 vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/LICENSE create mode 100644 vendor/github.com/go-jose/go-jose/v3/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/SECURITY.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/asymmetric.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/crypter.go rename vendor/{knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned => github.com/go-jose/go-jose/v3}/doc.go (55%) create mode 100644 vendor/github.com/go-jose/go-jose/v3/encoding.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/LICENSE create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/decode.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/encode.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/indent.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/scanner.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/stream.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/json/tags.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwe.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwk.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jws.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/builder.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/claims.go rename vendor/{knative.dev/eventing-kafka-broker/control-plane/pkg/client/internals/kafka/clientset/versioned => github.com/go-jose/go-jose/v3/jwt}/doc.go (77%) create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/errors.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/jwt.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/jwt/validation.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/opaque.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/shared.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/signing.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/symmetric.go rename vendor/github.com/google/{gnostic => gnostic-models}/LICENSE (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/README.md (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/context.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/error.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/extensions.go (97%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/helpers.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/main.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/reader.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/README.md (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/extension.pb.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/extension.proto (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/extensions.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/README.md (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/base.go (90%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/display.go (92%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/models.go (97%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/operations.go (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/reader.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/schema.json (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/writer.go (92%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/OpenAPIv2.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/OpenAPIv2.pb.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/OpenAPIv2.proto (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/README.md (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/document.go (96%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/openapi-2.0.json (100%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/OpenAPIv3.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/OpenAPIv3.pb.go (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/OpenAPIv3.proto (99%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/README.md (89%) rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/document.go (96%) delete mode 100644 vendor/github.com/google/gnostic/openapiv3/annotations.pb.go delete mode 100644 vendor/github.com/google/gnostic/openapiv3/annotations.proto delete mode 100644 vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json delete mode 100644 vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json create mode 100644 vendor/github.com/gorilla/websocket/.editorconfig create mode 100644 vendor/github.com/gorilla/websocket/.golangci.yml delete mode 100644 vendor/github.com/gorilla/websocket/AUTHORS create mode 100644 vendor/github.com/gorilla/websocket/Makefile delete mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go delete mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go create mode 100644 vendor/github.com/klauspost/compress/flate/deflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/fast_encoder.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_code.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate_gen.go create mode 100644 vendor/github.com/klauspost/compress/flate/level1.go create mode 100644 vendor/github.com/klauspost/compress/flate/level2.go create mode 100644 vendor/github.com/klauspost/compress/flate/level3.go create mode 100644 vendor/github.com/klauspost/compress/flate/level4.go create mode 100644 vendor/github.com/klauspost/compress/flate/level5.go create mode 100644 vendor/github.com/klauspost/compress/flate/level6.go create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/regmask_other.go create mode 100644 vendor/github.com/klauspost/compress/flate/stateless.go create mode 100644 vendor/github.com/klauspost/compress/flate/token.go create mode 100644 vendor/github.com/klauspost/compress/gzip/gunzip.go create mode 100644 vendor/github.com/klauspost/compress/gzip/gzip.go rename vendor/github.com/openshift/api/config/v1/{0000_10_config-operator_01_infrastructure.crd.yaml => 0000_10_config-operator_01_infrastructure-Default.crd.yaml} (78%) create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/config/v1/Makefile create mode 100644 vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.authentication.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.clusteroperator.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.console.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.featuregate.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.image.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.imagecontentpolicy.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.ingress.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.node.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.oauth.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.operatorhub.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.project.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.proxy.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/stable.scheduler.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/Makefile create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/register.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/types_insights.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/console/v1/0000_51_consoleplugin.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/Makefile create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consoleclidownload.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consoleexternalloglink.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consolelink.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consolenotification.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consoleplugin.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consolequickstart.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consoleyamlsample.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/types_console_plugin.go delete mode 100644 vendor/github.com/openshift/api/console/v1alpha1/0000_10_consoleplugin.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/0000_51_consoleplugin.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/Makefile create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/stable.consoleplugin.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/route/v1/Makefile create mode 100644 vendor/github.com/openshift/api/route/v1/route.crd.yaml create mode 100644 vendor/github.com/openshift/api/route/v1/route.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/route/v1/stable.route.testsuite.yaml create mode 100755 vendor/github.com/openshift/api/route/v1/test-route-validation.sh create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/applicationmenuspec.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/clidownloadlink.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consoleclidownload.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consoleclidownloadspec.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consoleexternalloglink.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consoleexternalloglinkspec.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolelink.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolelinkspec.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolenotification.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolenotificationspec.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consoleplugin.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolepluginbackend.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consoleplugini18n.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolepluginproxy.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolepluginproxyendpoint.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolepluginproxyserviceconfig.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolepluginservice.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolepluginspec.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolequickstart.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolequickstartspec.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolequickstarttask.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolequickstarttaskreview.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consolequickstarttasksummary.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consoleyamlsample.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/consoleyamlsamplespec.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/link.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/console/v1/namespacedashboardspec.go create mode 100644 vendor/github.com/openshift/client-go/console/applyconfigurations/internal/internal.go create mode 100644 vendor/github.com/openshift/client-go/console/clientset/versioned/typed/console/v1/consoleplugin.go create mode 100644 vendor/github.com/openshift/client-go/route/applyconfigurations/internal/internal.go create mode 100644 vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/route.go create mode 100644 vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingress.go create mode 100644 vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingresscondition.go create mode 100644 vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeport.go create mode 100644 vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routespec.go create mode 100644 vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routestatus.go create mode 100644 vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routetargetreference.go create mode 100644 vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/tlsconfig.go create mode 100644 vendor/github.com/rs/dnscache/.travis.yml create mode 100644 vendor/github.com/rs/dnscache/LICENSE create mode 100644 vendor/github.com/rs/dnscache/README.md create mode 100644 vendor/github.com/rs/dnscache/dnscache.go rename vendor/{k8s.io/client-go/third_party/forked/golang => golang.org/x/exp}/LICENSE (100%) rename vendor/{k8s.io/client-go/third_party/forked/golang => golang.org/x/exp}/PATENTS (100%) create mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go delete mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go delete mode 100644 vendor/golang.org/x/tools/go/gcexportdata/importer.go delete mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go delete mode 100644 vendor/golang.org/x/tools/go/packages/doc.go delete mode 100644 vendor/golang.org/x/tools/go/packages/external.go delete mode 100644 vendor/golang.org/x/tools/go/packages/golist.go delete mode 100644 vendor/golang.org/x/tools/go/packages/golist_overlay.go delete mode 100644 vendor/golang.org/x/tools/go/packages/loadmode_string.go delete mode 100644 vendor/golang.org/x/tools/go/packages/packages.go delete mode 100644 vendor/golang.org/x/tools/go/packages/visit.go delete mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/bimport.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/exportdata.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iexport.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iimport.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_no.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go delete mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/codes.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/decoder.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/doc.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/encoder.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/flags.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/reloc.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/support.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/sync.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go delete mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go delete mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode.go delete mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go delete mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types.go delete mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types_118.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/gover.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go121.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go122.go create mode 100644 vendor/istio.io/api/security/v1/authorization_policy.pb.go create mode 100644 vendor/istio.io/api/security/v1/authorization_policy.pb.html create mode 100644 vendor/istio.io/api/security/v1/authorization_policy.proto create mode 100644 vendor/istio.io/api/security/v1/authorization_policy_deepcopy.gen.go create mode 100644 vendor/istio.io/api/security/v1/authorization_policy_json.gen.go create mode 100644 vendor/istio.io/api/security/v1/jwt.pb.go create mode 100644 vendor/istio.io/api/security/v1/jwt.pb.html create mode 100644 vendor/istio.io/api/security/v1/jwt.proto create mode 100644 vendor/istio.io/api/security/v1/jwt_deepcopy.gen.go create mode 100644 vendor/istio.io/api/security/v1/jwt_json.gen.go create mode 100644 vendor/istio.io/api/security/v1/request_authentication.pb.go create mode 100644 vendor/istio.io/api/security/v1/request_authentication.pb.html create mode 100644 vendor/istio.io/api/security/v1/request_authentication.proto create mode 100644 vendor/istio.io/api/security/v1/request_authentication_deepcopy.gen.go create mode 100644 vendor/istio.io/api/security/v1/request_authentication_json.gen.go create mode 100644 vendor/istio.io/client-go/pkg/apis/security/v1/doc.go create mode 100644 vendor/istio.io/client-go/pkg/apis/security/v1/register.gen.go create mode 100644 vendor/istio.io/client-go/pkg/apis/security/v1/types.gen.go create mode 100644 vendor/istio.io/client-go/pkg/apis/security/v1/zz_generated.deepcopy.gen.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/doc.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/register.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/types.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1/doc.go rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1}/generated.pb.go (86%) create mode 100644 vendor/k8s.io/api/flowcontrol/v1/generated.proto rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1}/register.go (95%) rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1}/types.go (75%) create mode 100644 vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1beta3}/doc.go (77%) create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1beta3}/generated.proto (79%) create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/register.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1beta3/types.go rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1beta3}/types_swagger_doc_generated.go (81%) rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1beta3}/zz_generated.deepcopy.go (92%) rename vendor/k8s.io/api/flowcontrol/{v1alpha1 => v1beta3}/zz_generated.prerelease-lifecycle.go (94%) create mode 100644 vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/doc.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/generated.pb.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/generated.proto create mode 100644 vendor/k8s.io/api/resource/v1alpha2/register.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/types.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/splice.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/dump/dump.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/delay.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/error.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/loop.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/poll.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/timer.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go rename vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/{rulewithoperations.go => namedrulewithoperations.go} (56%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/allowedflexvolume.go => apps/v1beta1/statefulsetordinals.go} (50%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go rename vendor/k8s.io/client-go/applyconfigurations/{autoscaling/v2/podresourcemetricsource.go => core/v1/podresourceclaim.go} (51%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go rename vendor/k8s.io/client-go/applyconfigurations/{policy/v1beta1/allowedcsidriver.go => core/v1/resourceclaim.go} (65%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/flowdistinguishermethod.go (87%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/flowschema.go (94%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/flowschemaspec.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/flowschemastatus.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/groupsubject.go (98%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/limitedprioritylevelconfiguration.go (57%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/limitresponse.go (88%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/nonresourcepolicyrule.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/policyruleswithsubjects.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/prioritylevelconfiguration.go (94%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/prioritylevelconfigurationreference.go (98%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/prioritylevelconfigurationspec.go (73%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/prioritylevelconfigurationstatus.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/queuingconfiguration.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/resourcepolicyrule.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/serviceaccountsubject.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/subject.go (92%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1}/usersubject.go (98%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go rename vendor/k8s.io/client-go/applyconfigurations/{policy/v1beta1/podsecuritypolicy.go => flowcontrol/v1beta3/flowschema.go} (66%) rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1beta3}/flowschemacondition.go (83%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/networkpolicystatus.go => flowcontrol/v1beta3/flowschemastatus.go} (60%) rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/allowedcsidriver.go => flowcontrol/v1beta3/groupsubject.go} (65%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go rename vendor/k8s.io/client-go/applyconfigurations/flowcontrol/{v1alpha1 => v1beta3}/prioritylevelconfigurationcondition.go (86%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go rename vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/{clustercidr.go => ipaddress.go} (69%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go rename vendor/k8s.io/client-go/applyconfigurations/{extensions/v1beta1/podsecuritypolicy.go => networking/v1alpha1/servicecidr.go} (65%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go rename vendor/k8s.io/client-go/applyconfigurations/networking/{v1/networkpolicystatus.go => v1alpha1/servicecidrstatus.go} (68%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/discovery/aggregated_discovery.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/doc.go create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go rename vendor/k8s.io/client-go/informers/flowcontrol/{v1alpha1 => v1}/interface.go (99%) create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go rename vendor/k8s.io/client-go/informers/flowcontrol/{v1alpha1 => v1beta3}/flowschema.go (83%) create mode 100644 vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/interface.go rename vendor/k8s.io/client-go/informers/flowcontrol/{v1alpha1 => v1beta3}/prioritylevelconfiguration.go (81%) rename vendor/k8s.io/client-go/informers/{extensions/v1beta1/podsecuritypolicy.go => networking/v1alpha1/ipaddress.go} (50%) rename vendor/k8s.io/client-go/informers/networking/v1alpha1/{clustercidr.go => servicecidr.go} (69%) create mode 100644 vendor/k8s.io/client-go/informers/resource/interface.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go rename vendor/k8s.io/client-go/informers/{policy/v1beta1/podsecuritypolicy.go => resource/v1alpha2/resourceclass.go} (51%) create mode 100644 vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go rename vendor/k8s.io/client-go/kubernetes/typed/{flowcontrol => admissionregistration}/v1alpha1/doc.go (100%) rename vendor/k8s.io/client-go/kubernetes/typed/{flowcontrol => admissionregistration}/v1alpha1/fake/doc.go (100%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go rename vendor/{knative.dev/serving/pkg/client/clientset/versioned/typed/serving => k8s.io/client-go/kubernetes/typed/authentication}/v1alpha1/doc.go (94%) rename vendor/{knative.dev/networking/pkg/client/clientset/versioned => k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake}/doc.go (83%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_authentication_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go rename vendor/{knative.dev/serving/pkg/client/clientset/versioned/typed/serving => k8s.io/client-go/kubernetes/typed/authentication}/v1alpha1/generated_expansion.go (88%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go rename vendor/{knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/serving_client.go => k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go} (60%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go rename vendor/{knative.dev/operator/pkg/client/clientset/versioned => k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake}/doc.go (83%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go rename vendor/{knative.dev/serving/pkg/client/clientset/versioned/doc.go => k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go} (83%) delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/doc.go rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/fake/fake_flowcontrol_client.go (72%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/flowcontrol_client.go (64%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1}/generated_expansion.go (97%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowcontrol_client.go rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1beta3}/fake/fake_flowschema.go (71%) rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1beta3}/fake/fake_prioritylevelconfiguration.go (66%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1beta3}/flowschema.go (77%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/generated_expansion.go rename vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/{v1alpha1 => v1beta3}/prioritylevelconfiguration.go (74%) delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go rename vendor/k8s.io/client-go/listers/flowcontrol/{v1alpha1 => v1}/expansion_generated.go (98%) rename vendor/k8s.io/client-go/listers/flowcontrol/{v1alpha1 => v1}/flowschema.go (79%) rename vendor/k8s.io/client-go/listers/flowcontrol/{v1alpha1 => v1}/prioritylevelconfiguration.go (79%) create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go create mode 100644 vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go rename vendor/k8s.io/client-go/listers/networking/v1alpha1/{clustercidr.go => servicecidr.go} (54%) delete mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/openapi/typeconverter.go delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp_stub.go delete mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go delete mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go create mode 100644 vendor/k8s.io/client-go/tools/cache/object-names.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go create mode 100644 vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go create mode 100644 vendor/k8s.io/client-go/tools/internal/events/interfaces.go delete mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go delete mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go create mode 100644 vendor/k8s.io/client-go/transport/cache_go118.go delete mode 100644 vendor/k8s.io/client-go/util/jsonpath/doc.go delete mode 100644 vendor/k8s.io/client-go/util/jsonpath/jsonpath.go delete mode 100644 vendor/k8s.io/client-go/util/jsonpath/node.go delete mode 100644 vendor/k8s.io/client-go/util/jsonpath/parser.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/args.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/externaltypes.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/jsontagutil.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/openapi.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/packages.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/refgraph.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/util.go create mode 100644 vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/main.go create mode 100755 vendor/k8s.io/code-generator/kube_codegen.sh delete mode 100644 vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/cached/cache.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/markers.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/flags.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/serialization.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/AUTHORS create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/CONTRIBUTORS create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go create mode 100644 vendor/k8s.io/utils/net/ipfamily.go create mode 100644 vendor/k8s.io/utils/ptr/README.md create mode 100644 vendor/k8s.io/utils/ptr/ptr.go create mode 100644 vendor/knative.dev/eventing-kafka-broker/test/rekt/features/ce_extensions.go create mode 100644 vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/doc.go rename vendor/knative.dev/{serving/pkg/apis/serving/v1alpha1/domainmapping_defaults.go => eventing/pkg/apis/eventing/v1beta3/eventtype_conversion.go} (51%) create mode 100644 vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_defaults.go create mode 100644 vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_lifecycle.go create mode 100644 vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_types.go create mode 100644 vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_validation.go rename vendor/knative.dev/{serving/pkg/apis/serving/v1alpha1 => eventing/pkg/apis/eventing/v1beta3}/register.go (75%) rename vendor/knative.dev/{serving/pkg/apis/serving/v1alpha1 => eventing/pkg/apis/eventing/v1beta3}/zz_generated.deepcopy.go (53%) create mode 100644 vendor/knative.dev/eventing/pkg/auth/audience.go create mode 100644 vendor/knative.dev/eventing/pkg/auth/serviceaccount.go create mode 100644 vendor/knative.dev/eventing/pkg/auth/token_provider.go create mode 100644 vendor/knative.dev/eventing/pkg/auth/token_verifier.go create mode 100644 vendor/knative.dev/eventing/pkg/auth/utils.go create mode 100644 vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/doc.go create mode 100644 vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/eventing_client.go create mode 100644 vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/eventtype.go rename vendor/knative.dev/eventing/pkg/client/clientset/versioned/{ => typed/eventing/v1beta3/fake}/doc.go (88%) create mode 100644 vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/fake/fake_eventing_client.go create mode 100644 vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/fake/fake_eventtype.go create mode 100644 vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/generated_expansion.go create mode 100644 vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3/eventtype.go create mode 100644 vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3/interface.go create mode 100644 vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta3/eventtype.go create mode 100644 vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta3/expansion_generated.go create mode 100644 vendor/knative.dev/eventing/test/rekt/features/broker/oidc_feature.go create mode 100644 vendor/knative.dev/eventing/test/rekt/features/channel/oidc_feature.go create mode 100644 vendor/knative.dev/eventing/test/rekt/features/pingsource/oidc_feature.go rename vendor/knative.dev/hack/{hack.go => embed.go} (74%) delete mode 100644 vendor/knative.dev/hack/go.work.sum create mode 100644 vendor/knative.dev/networking/pkg/apis/networking/v1alpha1/ingress_helpers.go create mode 100644 vendor/knative.dev/pkg/apis/duck/v1/auth_types.go rename vendor/knative.dev/{hack => pkg/test/upgrade}/shell/executor.go (82%) rename vendor/knative.dev/{hack => pkg/test/upgrade}/shell/fail-example.sh (91%) rename vendor/knative.dev/{hack => pkg/test/upgrade}/shell/prefixer.go (97%) rename vendor/knative.dev/{hack => pkg/test/upgrade}/shell/project.go (91%) rename vendor/knative.dev/{hack => pkg/test/upgrade}/shell/types.go (81%) create mode 100644 vendor/knative.dev/reconciler-test/pkg/resources/serviceaccount/serviceaccount.go create mode 100644 vendor/knative.dev/reconciler-test/pkg/resources/serviceaccount/serviceaccount.yaml delete mode 100644 vendor/knative.dev/serving/AUTHORS delete mode 100644 vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/README.md delete mode 100644 vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/conversion_error.go delete mode 100644 vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/doc.go delete mode 100644 vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/domainmapping_lifecycle.go delete mode 100644 vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/domainmapping_types.go delete mode 100644 vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/domainmapping_validation.go delete mode 100644 vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/domainmapping.go rename vendor/knative.dev/serving/test/{e2e-auto-tls-tests.sh => e2e-external-domain-tls-tests.sh} (50%) delete mode 100755 vendor/knative.dev/serving/test/e2e-internal-encryption-tests.sh create mode 100644 vendor/knative.dev/serving/test/upgrade/deployment_failure.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/delegating_by_gvk_cache.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go rename vendor/sigs.k8s.io/controller-runtime/pkg/client/{client_cache.go => client_rest_resources.go} (82%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go rename vendor/sigs.k8s.io/controller-runtime/pkg/{source/internal/eventsource.go => internal/source/event_handler.go} (67%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/syncs/syncs.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/leaderelection.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/server/server.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go diff --git a/go.mod b/go.mod index 0b815b1730..a196fc708e 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/manifestival/controller-runtime-client v0.4.0 github.com/manifestival/manifestival v0.7.2 github.com/openshift/api v3.9.0+incompatible - github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a + github.com/openshift/client-go v0.0.0-20220831193253-4950ae70c8ea github.com/openshift/machine-config-operator v0.0.1-0.20220201192635-14a1ca2cb91f github.com/operator-framework/api v0.22.0 github.com/operator-framework/operator-lifecycle-manager v0.25.0 @@ -28,33 +28,38 @@ require ( k8s.io/client-go v0.29.2 knative.dev/eventing v0.40.2 knative.dev/eventing-kafka-broker v0.37.0 - knative.dev/hack v0.0.0-20231123073118-c0f04e812cfe - knative.dev/networking v0.0.0-20231012062757-a5958051caf8 - knative.dev/operator v0.38.12 - knative.dev/pkg v0.0.0-20231103161548-f5b42e8dea44 - knative.dev/serving v0.38.5 + knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a + knative.dev/networking v0.0.0-20240116081125-ce0738abf051 + knative.dev/operator v0.40.3 + knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 + knative.dev/serving v0.39.1-0.20240123152112-7509f7d9b806 sigs.k8s.io/controller-runtime v0.17.2 sigs.k8s.io/yaml v1.4.0 ) require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/IBM/sarama v1.42.2 // indirect github.com/cloudevents/conformance v0.2.0 // indirect + github.com/coreos/go-oidc/v3 v3.9.0 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/google/gnostic v0.6.9 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect + golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect ) @@ -67,7 +72,6 @@ require ( contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect contrib.go.opencensus.io/exporter/zipkin v0.1.2 // indirect - github.com/Shopify/sarama v1.37.2 // indirect github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blendle/zapdriver v1.3.1 // indirect @@ -80,7 +84,7 @@ require ( github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect @@ -88,7 +92,7 @@ require ( github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.9 // indirect - github.com/gobuffalo/flect v0.2.4 // indirect + github.com/gobuffalo/flect v1.0.2 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -99,7 +103,7 @@ require ( github.com/google/uuid v1.6.0 github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect @@ -136,7 +140,7 @@ require ( github.com/rickb777/plural v1.2.2 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/sirupsen/logrus v1.9.2 // indirect - github.com/tsenart/vegeta/v12 v12.8.4 // indirect + github.com/tsenart/vegeta/v12 v12.11.1 // indirect github.com/wavesoftware/go-ensure v1.0.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect @@ -152,8 +156,8 @@ require ( golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.16.1 // indirect - gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect + golang.org/x/tools v0.17.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/api v0.155.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect @@ -161,8 +165,8 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 - istio.io/api v0.0.0-20220420164308-b6a03a9e477e // indirect - istio.io/client-go v1.13.3 // indirect + istio.io/api v0.0.0-20231206023236-e7cadb36da57 // indirect + istio.io/client-go v1.18.7 // indirect k8s.io/apiserver v0.29.2 // indirect k8s.io/code-generator v0.29.2 // indirect k8s.io/component-base v0.29.2 // indirect @@ -170,7 +174,7 @@ require ( k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240209001042-7a0d5b415232 // indirect k8s.io/utils v0.0.0-20240102154912-e7106e64919e - knative.dev/caching v0.0.0-20231023175240-c47a37c662c8 // indirect + knative.dev/caching v0.0.0-20240116080314-0a234c8b78ac // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) @@ -179,23 +183,23 @@ require ( github.com/go-logr/logr v1.4.1 go.uber.org/atomic v1.11.0 // indirect k8s.io/apiextensions-apiserver v0.29.2 - knative.dev/reconciler-test v0.0.0-20240206112133-c345dafdf302 + knative.dev/reconciler-test v0.0.0-20240319084821-8c063dd50b3c ) replace ( // Knative components - knative.dev/eventing => github.com/openshift-knative/eventing v0.99.1-0.20240304152419-a962e15b013e - knative.dev/eventing-kafka-broker => github.com/openshift-knative/eventing-kafka-broker v0.25.1-0.20240216150930-ed42c15c5543 - knative.dev/hack => knative.dev/hack v0.0.0-20231123073118-c0f04e812cfe - knative.dev/networking => knative.dev/networking v0.0.0-20231023175057-21fb00ea6096 - knative.dev/pkg => knative.dev/pkg v0.0.0-20231103161548-f5b42e8dea44 - knative.dev/reconciler-test => knative.dev/reconciler-test v0.0.0-20240206112133-c345dafdf302 - knative.dev/serving => github.com/openshift-knative/serving v0.10.1-0.20240220063115-90e5f2c73b80 + knative.dev/eventing => github.com/openshift-knative/eventing v0.99.1-0.20240319111239-edb0e1086267 + knative.dev/eventing-kafka-broker => github.com/openshift-knative/eventing-kafka-broker v0.25.1-0.20240322081612-ef01093f12e0 + knative.dev/hack => knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a + knative.dev/networking => knative.dev/networking v0.0.0-20240116081125-ce0738abf051 + knative.dev/pkg => knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 + knative.dev/reconciler-test => knative.dev/reconciler-test v0.0.0-20240319084821-8c063dd50b3c + knative.dev/serving => github.com/openshift-knative/serving v0.10.1-0.20240314151203-b3745c450c66 ) replace ( // OpenShift components - github.com/openshift/api => github.com/openshift/api v0.0.0-20240131175612-92fe66c75e8f - github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20220603133046-984ee5ebedcf - github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20231113175050-15d0b0288a43 + github.com/openshift/api => github.com/openshift/api v0.0.0-20240103200955-7ca3a4634e46 + github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c + github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20240321132456-9c607163d755 ) diff --git a/go.sum b/go.sum index d40600bfc9..dbd4ad59d6 100644 --- a/go.sum +++ b/go.sum @@ -6,6 +6,7 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -13,18 +14,6 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -39,7 +28,6 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -60,68 +48,68 @@ contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9 contrib.go.opencensus.io/exporter/zipkin v0.1.2 h1:YqE293IZrKtqPnpwDPH/lOqTWD/s3Iwabycam74JV3g= contrib.go.opencensus.io/exporter/zipkin v0.1.2/go.mod h1:mP5xM3rrgOjpn79MM8fZbj3gsxcuytSqtH0dxSWW1RE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/IBM/sarama v1.42.2 h1:VoY4hVIZ+WQJ8G9KNY/SQlWguBQXQ9uvFPOnrcu8hEw= +github.com/IBM/sarama v1.42.2/go.mod h1:FLPGUGwYqEs62hq2bVG6Io2+5n+pS6s/WOXVKWSLFtE= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.37.2 h1:LoBbU0yJPte0cE5TZCGdlzZRmMgMtZU/XgnUKZg9Cv4= -github.com/Shopify/sarama v1.37.2/go.mod h1:Nxye/E+YPru//Bpaorfhc3JsSGYwCaDDj+R4bK52U5o= -github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= -github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= -github.com/alecthomas/jsonschema v0.0.0-20180308105923-f2c93856175a/go.mod h1:qpebaTNSsyUn5rPSJMsfqEtDw71TTggXM6stUDI16HA= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE= github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= -github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b h1:AP/Y7sqYicnjGDfD5VcY4CIfh1hRXBUavxrvELjTiOE= -github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e h1:mWOqoK5jV13ChKf/aF3plwQ96laasTJgZi4f1aSOu+M= +github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -138,49 +126,39 @@ github.com/cloudevents/sdk-go/sql/v2 v2.15.2/go.mod h1:us+PSk8OXdk8pDbRfvxy5w8ub github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc= github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= +github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= -github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= -github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= -github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8= -github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-gk v0.0.0-20140819190930-201884a44051/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E= github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654 h1:XOPLOMn/zT4jIgxfxSsoXPxkrzz0FaCHwp33x5POJ+Q= github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E= -github.com/dgryski/go-lttb v0.0.0-20180810165845-318fcdf10a77/go.mod h1:Va5MyIzkU0rAM92tn3hb3Anb7oz7KcnixF49+2wOMe4= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.5.0 h1:dRsaR00whmQD+SgVKlq/vCRFNgtEb5yppyeVos3Yce0= @@ -190,50 +168,49 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4A github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.0.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= +github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -247,48 +224,83 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gobuffalo/flect v0.2.4 h1:BSYA8+T60cdyq+vynaSUjqSVI9mDEg9ZfQUXKmfjo4I= -github.com/gobuffalo/flect v0.2.4/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= +github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -302,8 +314,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -319,31 +330,18 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= -github.com/gonum/diff v0.0.0-20181124234638-500114f11e71/go.mod h1:22dM4PLscQl+Nzf64qNBurVJvfyvZELT0iRW2l/NN70= -github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= -github.com/gonum/integrate v0.0.0-20181209220457-a422b5c0fdf2/go.mod h1:pDgmNM6seYpwvPos3q+zxlXMsbve6mOIPucUnUOrI7Y= -github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= -github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= -github.com/gonum/mathext v0.0.0-20181121095525-8a4bf007ea55/go.mod h1:fmo8aiSEWkJeiGXUJf+sPvuDgEFgqIoZSs843ePKrGg= -github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= -github.com/gonum/stat v0.0.0-20181125101827-41a0da705a5b/go.mod h1:Z4GIJBJO3Wa4gD4vbwQxXXZ+WHmW6E9ixmNrwvs0iZs= +github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -355,13 +353,13 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k= github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -369,11 +367,8 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -381,18 +376,13 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -400,30 +390,33 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -433,40 +426,26 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4= github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/tdigest v0.0.0-20180711151920-a7d76c6f093a/go.mod h1:9GkyshztGufsdPQWjH+ifgnIr3xNUL5syI70g2dzU1o= github.com/influxdata/tdigest v0.0.1 h1:XpFptwYmnEKUqmkcDjrzffswZ3nvNeevbUSLPP/ZzIY= github.com/influxdata/tdigest v0.0.1/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= github.com/jaegertracing/jaeger v1.55.0 h1:IJHzKb2B9EYQyKlE7VSoKzNP3emHeqZWnWrKj+kYzzs= @@ -485,12 +464,13 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -498,7 +478,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= @@ -511,25 +490,26 @@ github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2e github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifestival/client-go-client v0.5.0 h1:LZUidASM6rwTEI40wtxYDKi+VHhDRnYT4xuYuLjExp4= @@ -542,25 +522,17 @@ github.com/manifestival/manifestival v0.7.2/go.mod h1:nl3T6HlfHCeidooWVTMI9vYNTB github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.17/go.mod h1:WgzbA6oji13JREwiNsRDNfl7jYdPnmz+VEuLrA+/48M= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -577,54 +549,41 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= -github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= +github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= -github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= -github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/openshift-knative/eventing v0.99.1-0.20240304152419-a962e15b013e h1:laOY7zl/Ex+iTdUc+jt026hfiRCR/CePm8gHPl8BGKo= -github.com/openshift-knative/eventing v0.99.1-0.20240304152419-a962e15b013e/go.mod h1:aLn2uc+KJCwpR6i2cjRb8Ox6H3+y6gtgtrZrGEF7Ybg= -github.com/openshift-knative/eventing-kafka-broker v0.25.1-0.20240216150930-ed42c15c5543 h1:mnfenwWB3Q8rLvrc0M9Hshlp/bJf0YCEMXeO4Ud1tEo= -github.com/openshift-knative/eventing-kafka-broker v0.25.1-0.20240216150930-ed42c15c5543/go.mod h1:2+sgPGEYjv3NFhM4f3djnUaPwpJHms/Wwjp7/vHx+lY= -github.com/openshift-knative/serving v0.10.1-0.20240220063115-90e5f2c73b80 h1:+TGiRsbyVXd5M3uPpD0+5jedeVkXflcphZxxOrHVZ9Y= -github.com/openshift-knative/serving v0.10.1-0.20240220063115-90e5f2c73b80/go.mod h1:5JIK94q75k2Y09CKpFRMe6Rs12bgCGv25wInPor/XCk= -github.com/openshift/api v0.0.0-20240131175612-92fe66c75e8f h1:v/UGegormU7y/1hMpt52McJtlBrsLgXpySOesXWFQVg= -github.com/openshift/api v0.0.0-20240131175612-92fe66c75e8f/go.mod h1:LEnw1IVscIxyDnltE3Wi7bQb/QzIM8BfPNKoGA1Qlxw= -github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/client-go v0.0.0-20220603133046-984ee5ebedcf h1:gAYYPWVduONFJ6yuczLleApk0nEH3W0GgxDX2+O+B9E= -github.com/openshift/client-go v0.0.0-20220603133046-984ee5ebedcf/go.mod h1:eDO5QeVi2IiXmDwB0e2z1DpAznWroZKe978pzZwFBzg= -github.com/openshift/machine-config-operator v0.0.1-0.20231113175050-15d0b0288a43 h1:Ib9AjGHbrMO/sdxOtXQkjjlIhClO8+hTZPCDF8aV1+w= -github.com/openshift/machine-config-operator v0.0.1-0.20231113175050-15d0b0288a43/go.mod h1:1S0PgowzvwIOt6+dHbsAwcJSsL5+mKxqvXfn6Pzif3A= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openshift-knative/eventing v0.99.1-0.20240319111239-edb0e1086267 h1:OkfRruRFmlrn6p6Lug6X9FX/JZ38ZhUKY1mVAoUb+xE= +github.com/openshift-knative/eventing v0.99.1-0.20240319111239-edb0e1086267/go.mod h1:nXXNRgFkyua58NtqnHT0qHU6VUmB8EheZMqorFbn60o= +github.com/openshift-knative/eventing-kafka-broker v0.25.1-0.20240322081612-ef01093f12e0 h1:l3VxdlVkDIWXHExBHbWfYf+iPbRL0p43hi0PKt8n13o= +github.com/openshift-knative/eventing-kafka-broker v0.25.1-0.20240322081612-ef01093f12e0/go.mod h1:XrZV/DDcmhJ5ZqEz9ueICqifQu54zAQ9ctsVQnsyjtE= +github.com/openshift-knative/serving v0.10.1-0.20240314151203-b3745c450c66 h1:UdnrgJld3ZTkeXq7NbeCg04fdKLLgqoLL7Zzaz3HZMI= +github.com/openshift-knative/serving v0.10.1-0.20240314151203-b3745c450c66/go.mod h1:Ory3XczDB8b1lH757CSdeDeouY3LHzSamX8IjmStuoU= +github.com/openshift/api v0.0.0-20240103200955-7ca3a4634e46 h1:mnrBzHjjqYKw2uinOVXL9Eplj3+QaQwJ3SaWAs8l6cc= +github.com/openshift/api v0.0.0-20240103200955-7ca3a4634e46/go.mod h1:aQ6LDasvHMvHZXqLHnX2GRmnfTWCF/iIwz8EMTTIE9A= +github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c h1:CV76yFOTXmq9VciBR3Bve5ZWzSxdft7gaMVB3kS0rwg= +github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c/go.mod h1:lFMO8mLHXWFzSdYvGNo8ivF9SfF6zInA8ZGw4phRnUE= +github.com/openshift/machine-config-operator v0.0.1-0.20240321132456-9c607163d755 h1:/rtfJC/Gl95bhoZJwEnox41G9rjwIXTgqIcbfLDckLU= +github.com/openshift/machine-config-operator v0.0.1-0.20240321132456-9c607163d755/go.mod h1:LbP4+qcTPzBcFDz9FyQ/mNfVkpp8YsAg1fjF3TysWZo= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= @@ -634,7 +593,7 @@ github.com/operator-framework/api v0.22.0 h1:UZSn+iaQih4rCReezOnWTTJkMyawwV5iLnI github.com/operator-framework/api v0.22.0/go.mod h1:p/7YDbr+n4fmESfZ47yLAV1SvkfE6NU2aX8KhcfI0GA= github.com/operator-framework/operator-lifecycle-manager v0.25.0 h1:Y/ocKKQXxmxxNMH3xIbB0kRjicYIN9cN8ka/DUgjTGQ= github.com/operator-framework/operator-lifecycle-manager v0.25.0/go.mod h1:0DeNITwrneRQ7b5Qd6Dnp9+CpIBbv3F21RyncsK5ivU= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= @@ -649,11 +608,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= @@ -713,44 +670,40 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs= +github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6VSaahyKadf4WtSsJIgne6A1WLOAGM8A= -github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= +github.com/streadway/quantile v0.0.0-20220407130108-4246515d968d h1:X4+kt6zM/OVO6gbJdAfJR60MGPsqCzbtXNnjoGqdfAs= +github.com/streadway/quantile v0.0.0-20220407130108-4246515d968d/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -766,15 +719,17 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tsenart/go-tsz v0.0.0-20180814232043-cdeb9e1e981e/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo= -github.com/tsenart/vegeta/v12 v12.8.4 h1:UQ7tG7WkDorKj0wjx78Z4/vsMBP8RJQMGJqRVrkvngg= -github.com/tsenart/vegeta/v12 v12.8.4/go.mod h1:ZiJtwLn/9M4fTPdMY7bdbIeyNeFVE8/AHbWFqCsUuho= +github.com/tsenart/vegeta/v12 v12.11.1 h1:Rbwe7Zxr7sJ+BDTReemeQalYPvKiSV+O7nwmUs20B3E= +github.com/tsenart/vegeta/v12 v12.11.1/go.mod h1:swiFmrgpqj2llHURgHYFRFN0tfrIrlnspg01HjwOnSQ= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/wavesoftware/go-ensure v1.0.0 h1:6X3gQL5psBWwtu/H9a+69xQ+JGTUELaLhgOB/iB3AQk= github.com/wavesoftware/go-ensure v1.0.0/go.mod h1:K2UAFSwMTvpiRGay/M3aEYYuurcR8S4A6HkQlJPV8k4= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= @@ -783,102 +738,77 @@ github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -906,8 +836,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -916,32 +844,32 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -962,48 +890,26 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1016,29 +922,30 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1059,101 +966,89 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1174,42 +1069,28 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= -gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= @@ -1231,18 +1112,6 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA= google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1251,7 +1120,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1275,7 +1143,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1286,34 +1153,7 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= @@ -1323,6 +1163,7 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go. google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -1333,23 +1174,9 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1362,7 +1189,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= @@ -1374,16 +1200,15 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1401,8 +1226,8 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1410,78 +1235,96 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -istio.io/api v0.0.0-20220413180505-1574de06b7bd/go.mod h1:8ZZgyVgYrHhsFQarEgTfPnMGpdgTDZbxSjYhdwTUuAQ= -istio.io/api v0.0.0-20220420164308-b6a03a9e477e h1:PiNQiNIgEpioHeslnETLn8yjVccRrYwg+Z56bVk7psE= -istio.io/api v0.0.0-20220420164308-b6a03a9e477e/go.mod h1:qGm6l1okCpLqVKyXSfYjB+UXXgpE/LCggALuK25cSFY= -istio.io/client-go v1.13.3 h1:xbEgTX4NRlvVRI/JsCmMI0ATvCc9P85HkQ20SphEGZ4= -istio.io/client-go v1.13.3/go.mod h1:DeT/l4yO+bwyv0ZgavSTj7BfkA2cTckHD0jtluwtXhE= -istio.io/gogo-genproto v0.0.0-20211208193508-5ab4acc9eb1e/go.mod h1:vJDAniIqryf/z///fgZqVPKJ7N2lBk7Gg8DCTB7oCfU= -k8s.io/api v0.25.4 h1:3YO8J4RtmG7elEgaWMb4HgmpS2CfY1QlaOz9nwB+ZSs= -k8s.io/api v0.25.4/go.mod h1:IG2+RzyPQLllQxnhzD8KQNEu4c4YvyDTpSMztf4A0OQ= -k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= +istio.io/api v0.0.0-20231206023236-e7cadb36da57 h1:yINFMnBRewB/CxEwnR8bfsAcdkqgM0OqG1XweO8ctuU= +istio.io/api v0.0.0-20231206023236-e7cadb36da57/go.mod h1:dDMe1TsOtrRoUlBzdxqNolWXpXPQjLfbcXvqPMtQ6eo= +istio.io/client-go v1.18.7 h1:ZFCnnDuEPCz7kYoTfpaF17+KNVMhlsGK/wA11px1zws= +istio.io/client-go v1.18.7/go.mod h1:MeI/KZUVp3jWyoXGSpFkiLYz012qpbQbjnVuYMcM9k4= +k8s.io/api v0.15.7/go.mod h1:a/tUxscL+UxvYyA7Tj5DRc8ivYqJIO1Y5KDdlI6wSvo= +k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/api v0.19.7/go.mod h1:KTryDUT3l6Mtv7K2J2486PNL9DBns3wOYTkGR+iz63Y= +k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= -k8s.io/apimachinery v0.26.4 h1:rZccKdBLg9vP6J09JD+z8Yr99Ce8gk3Lbi9TCx05Jzs= -k8s.io/apimachinery v0.26.4/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= -k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= +k8s.io/apimachinery v0.15.7/go.mod h1:Xc10RHc1U+F/e9GCloJ8QAeCGevSVP5xhOhqlE+e1kM= +k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.7/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= +k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= k8s.io/apiserver v0.29.2 h1:+Z9S0dSNr+CjnVXQePG8TcBWHr3Q7BmAr7NraHvsMiQ= k8s.io/apiserver v0.29.2/go.mod h1:B0LieKVoyU7ykQvPFm7XSdIHaCHSzCzQWPFa5bqbeMQ= -k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8= -k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= -k8s.io/code-generator v0.25.4 h1:tjQ7/+9eN7UOiU2DP+0v4ntTI4JZLi2c1N0WllpFhTc= -k8s.io/code-generator v0.25.4/go.mod h1:9F5fuVZOMWRme7MYj2YT3L9ropPWPokd9VRhVyD3+0w= -k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= +k8s.io/client-go v0.15.7/go.mod h1:QMNB76d3lKPvPQdOOnnxUF693C3hnCzUbC2umg70pWA= +k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= +k8s.io/client-go v0.19.7/go.mod h1:iytGI7S3kmv6bWnn+bSQUE4VlrEi4YFssvVB7J7Hvqg= +k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/code-generator v0.29.2 h1:c9/iw2KnNpw2IRV+wwuG/Wns2TjPSgjWzbbjTevyiHI= +k8s.io/code-generator v0.29.2/go.mod h1:FwFi3C9jCrmbPjekhaCYcYG1n07CYiW1+PAPCockaos= +k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -knative.dev/caching v0.0.0-20231023175240-c47a37c662c8 h1:mQLrzyKfZ0eWVQDw21P0bx/xdmtcmhM4caVNyNiydFc= -knative.dev/caching v0.0.0-20231023175240-c47a37c662c8/go.mod h1:+2UlJE5sHZXxvJUSuMy6G+Hn20Lrqv/fQVy8ObO1lO4= -knative.dev/hack v0.0.0-20231123073118-c0f04e812cfe h1:8MMQg9UvxCLiOqWnWm6+kiYyV81Are8ocj7fX6qpgrk= -knative.dev/hack v0.0.0-20231123073118-c0f04e812cfe/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q= -knative.dev/networking v0.0.0-20231023175057-21fb00ea6096 h1:OpJxtpKqZFFpv6BneLUQRqzwi1nSEMfgpTWlzBKA5+k= -knative.dev/networking v0.0.0-20231023175057-21fb00ea6096/go.mod h1:OJpjGgNLAD1enDIhUXafQCwwMlPwLQlJuCFLpkz7w0g= -knative.dev/operator v0.38.12 h1:5Udirxqeb027Ug9IXnCsIAbX26P9OzHzEIP3X5ZcNps= -knative.dev/operator v0.38.12/go.mod h1:RzZBNRpNV/Ln2GOLqQH/pLlzJbrPZxowioF9TbfrgVk= -knative.dev/pkg v0.0.0-20231103161548-f5b42e8dea44 h1:2gjHbqg8K9k1KJtLgxsTvzxovXOhozcrk3AzzJmjsA0= -knative.dev/pkg v0.0.0-20231103161548-f5b42e8dea44/go.mod h1:g+UCgSKQ2f15kHYu/V3CPtoKo5F1x/2Y1ot0NSK7gA0= -knative.dev/reconciler-test v0.0.0-20240206112133-c345dafdf302 h1:W8fEMNvbCl/Xi9FXf28wLpUQUCLZIr4k4uCPbMe+BxE= -knative.dev/reconciler-test v0.0.0-20240206112133-c345dafdf302/go.mod h1:Yw7Jkv+7PjDitG6CUkakWc/5SZa8Tm/sgXfaFy305Ng= -pgregory.net/rapid v0.3.3 h1:jCjBsY4ln4Atz78QoBWxUEvAHaFyNDQg9+WU62aCn1U= -pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kube-openapi v0.0.0-20200204173128-addea2498afe/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20240209001042-7a0d5b415232 h1:MMq4iF9pHuAz/9dLnHwBQKEoeigXClzs3MFh/seyqtA= +k8s.io/kube-openapi v0.0.0-20240209001042-7a0d5b415232/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw= +k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +knative.dev/caching v0.0.0-20240116080314-0a234c8b78ac h1:Bq8V741pYcScTD4T7Z3lRPsydBv6GdDQbTQHNzcQQYM= +knative.dev/caching v0.0.0-20240116080314-0a234c8b78ac/go.mod h1:/SKJWSvv3QAFnIDH5LtvYRlC1739NNrhCbb4zPkisy0= +knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a h1:+4Mdk0Lt3LGAVEI6vYyhfjBlVBx7sqS4wECtTkuXoSY= +knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q= +knative.dev/networking v0.0.0-20240116081125-ce0738abf051 h1:bTRVfwmfu4/7U1YBcgBl1VANAwmal6zkoAI9p7PQwDY= +knative.dev/networking v0.0.0-20240116081125-ce0738abf051/go.mod h1:rdzGL1OVP6VItEiJUN/FTCrDnIzkA6ykhSvaK+0Ne6o= +knative.dev/operator v0.40.3 h1:rTi1RgMlb/rneYNqopiZBHqiFSDiftX9F9fFPTD9fqI= +knative.dev/operator v0.40.3/go.mod h1:hdNO5Vd9+XwGLdT/3U3NJN4URT1UL+E8zBTxfh3qKCI= +knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 h1:H6+JJN23fhwYWCHY1339sY6uhIyoUwDy1a8dN233fdk= +knative.dev/pkg v0.0.0-20240116073220-b488e7be5902/go.mod h1:NYk8mMYoLkO7CQWnNkti4YGGnvLxN6MIDbUvtgeo0C0= +knative.dev/reconciler-test v0.0.0-20240319084821-8c063dd50b3c h1:WVonoeQO5d/OKTkUfbFOW/eStpuyzaTN+fgx0twCUAA= +knative.dev/reconciler-test v0.0.0-20240319084821-8c063dd50b3c/go.mod h1:PdI3uCI/8URA+hyBvWqZ2pwCIvX/4/nqCNsdW1cQauM= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= -sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/controller-runtime v0.7.2/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= +sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= +sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/IBM/sarama/.gitignore similarity index 100% rename from vendor/github.com/Shopify/sarama/.gitignore rename to vendor/github.com/IBM/sarama/.gitignore diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml similarity index 78% rename from vendor/github.com/Shopify/sarama/.golangci.yml rename to vendor/github.com/IBM/sarama/.golangci.yml index 0b419abbfa..72e3e4c244 100644 --- a/vendor/github.com/Shopify/sarama/.golangci.yml +++ b/vendor/github.com/IBM/sarama/.golangci.yml @@ -19,61 +19,62 @@ linters-settings: misspell: locale: US goimports: - local-prefixes: github.com/Shopify/sarama + local-prefixes: github.com/IBM/sarama gocritic: enabled-tags: - diagnostic + - performance # - experimental # - opinionated - # - performance # - style + enabled-checks: + - importShadow + - nestingReduce + - stringsCompare + # - unnamedResult + # - whyNoLint disabled-checks: - assignOp - appendAssign - commentedOutCode + - hugeParam - ifElseChain - singleCaseSwitch - sloppyReassign - - wrapperFunc funlen: lines: 300 statements: 300 + depguard: + rules: + main: + deny: + - pkg: "io/ioutil" + desc: Use the "io" and "os" packages instead. + linters: disable-all: true enable: - bodyclose - - deadcode - depguard - exportloopref - dogsled - # - dupl - errcheck - errorlint - funlen - gochecknoinits - # - goconst - gocritic - gocyclo - gofmt - goimports - # - golint - gosec - # - gosimple - govet - # - ineffassign - misspell - # - nakedret - nilerr - # - paralleltest - # - scopelint - staticcheck - - structcheck - # - stylecheck - typecheck - unconvert - unused - - varcheck - whitespace issues: diff --git a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml new file mode 100644 index 0000000000..1869b8160e --- /dev/null +++ b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml @@ -0,0 +1,41 @@ +fail_fast: false +default_install_hook_types: [pre-commit, commit-msg] +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-merge-conflict + - id: check-yaml + - id: end-of-file-fixer + - id: fix-byte-order-marker + - id: mixed-line-ending + - id: trailing-whitespace + - repo: local + hooks: + - id: conventional-commit-msg-validation + name: commit message conventional validation + language: pygrep + entry: '^(?:fixup! )?(breaking|build|chore|ci|docs|feat|fix|perf|refactor|revert|style|test){1}(\([\w\-\.]+\))?(!)?: ([\w `])+([\s\S]*)' + args: [--multiline, --negate] + stages: [commit-msg] + - id: commit-msg-needs-to-be-signed-off + name: commit message needs to be signed off + language: pygrep + entry: "^Signed-off-by:" + args: [--multiline, --negate] + stages: [commit-msg] + - id: gofmt + name: gofmt + description: Format files with gofmt. + entry: gofmt -l + language: golang + files: \.go$ + args: [] + - repo: https://github.com/gitleaks/gitleaks + rev: v8.16.3 + hooks: + - id: gitleaks + - repo: https://github.com/golangci/golangci-lint + rev: v1.52.2 + hooks: + - id: golangci-lint diff --git a/vendor/github.com/IBM/sarama/CHANGELOG.md b/vendor/github.com/IBM/sarama/CHANGELOG.md new file mode 100644 index 0000000000..513b76f91a --- /dev/null +++ b/vendor/github.com/IBM/sarama/CHANGELOG.md @@ -0,0 +1,1719 @@ +# Changelog + +## Version 1.42.1 (2023-11-07) + +## What's Changed +### :bug: Fixes +* fix: make fetchInitialOffset use correct protocol by @dnwe in https://github.com/IBM/sarama/pull/2705 +* fix(config): relax ClientID validation after 1.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2706 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.0...v1.42.1 + +## Version 1.42.0 (2023-11-02) + +## What's Changed +### :bug: Fixes +* Asynchronously close brokers during a RefreshBrokers by @bmassemin in https://github.com/IBM/sarama/pull/2693 +* Fix data race on Broker.done channel by @prestona in https://github.com/IBM/sarama/pull/2698 +* fix: data race in Broker.AsyncProduce by @lzakharov in https://github.com/IBM/sarama/pull/2678 +* Fix default retention time value in offset commit by @prestona in https://github.com/IBM/sarama/pull/2700 +* fix(txmgr): ErrOffsetsLoadInProgress is retriable by @dnwe in https://github.com/IBM/sarama/pull/2701 +### :wrench: Maintenance +* chore(ci): improve ossf scorecard result by @dnwe in https://github.com/IBM/sarama/pull/2685 +* chore(ci): add kafka 3.6.0 to FVT and versions by @dnwe in https://github.com/IBM/sarama/pull/2692 +### :heavy_plus_sign: Other Changes +* chore(ci): ossf scorecard.yml by @dnwe in https://github.com/IBM/sarama/pull/2683 +* fix(ci): always run CodeQL on every commit by @dnwe in https://github.com/IBM/sarama/pull/2689 +* chore(doc): add OpenSSF Scorecard badge by @dnwe in https://github.com/IBM/sarama/pull/2691 + +## New Contributors +* @bmassemin made their first contribution in https://github.com/IBM/sarama/pull/2693 +* @lzakharov made their first contribution in https://github.com/IBM/sarama/pull/2678 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.3...v1.42.0 + +## Version 1.41.3 (2023-10-17) + +## What's Changed +### :bug: Fixes +* fix: pre-compile regex for parsing kafka version by @qshuai in https://github.com/IBM/sarama/pull/2663 +* fix(client): ignore empty Metadata responses when refreshing by @HaoSunUber in https://github.com/IBM/sarama/pull/2672 +### :package: Dependency updates +* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2661 +* chore(deps): bump golang.org/x/net from 0.16.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2671 +### :memo: Documentation +* fix(docs): correct topic name in rebalancing strategy example by @maksadbek in https://github.com/IBM/sarama/pull/2657 + +## New Contributors +* @maksadbek made their first contribution in https://github.com/IBM/sarama/pull/2657 +* @qshuai made their first contribution in https://github.com/IBM/sarama/pull/2663 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.2...v1.41.3 + +## Version 1.41.2 (2023-09-12) + +## What's Changed +### :tada: New Features / Improvements +* perf: Alloc records in batch by @ronanh in https://github.com/IBM/sarama/pull/2646 +### :bug: Fixes +* fix(consumer): guard against nil client by @dnwe in https://github.com/IBM/sarama/pull/2636 +* fix(consumer): don't retry session if ctx canceled by @dnwe in https://github.com/IBM/sarama/pull/2642 +* fix: use least loaded broker to refresh metadata by @HaoSunUber in https://github.com/IBM/sarama/pull/2645 +### :package: Dependency updates +* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2641 + +## New Contributors +* @HaoSunUber made their first contribution in https://github.com/IBM/sarama/pull/2645 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.1...v1.41.2 + +## Version 1.41.1 (2023-08-30) + +## What's Changed +### :bug: Fixes +* fix(proto): handle V3 member metadata and empty owned partitions by @dnwe in https://github.com/IBM/sarama/pull/2618 +* fix: make clear that error is configuration issue not server error by @hindessm in https://github.com/IBM/sarama/pull/2628 +* fix(client): force Event Hubs to use V1_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2633 +* fix: add retries to alter user scram creds by @hindessm in https://github.com/IBM/sarama/pull/2632 +### :wrench: Maintenance +* chore(lint): bump golangci-lint and tweak config by @dnwe in https://github.com/IBM/sarama/pull/2620 +### :memo: Documentation +* fix(doc): add missing doc for mock consumer by @hsweif in https://github.com/IBM/sarama/pull/2386 +* chore(proto): doc CreateTopics/JoinGroup fields by @dnwe in https://github.com/IBM/sarama/pull/2627 +### :heavy_plus_sign: Other Changes +* chore(gh): add new style issue templates by @dnwe in https://github.com/IBM/sarama/pull/2624 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.0...v1.41.1 + +## Version 1.41.0 (2023-08-21) + +## What's Changed +### :rotating_light: Breaking Changes + +Note: this version of Sarama has had a big overhaul in its adherence to the use of the right Kafka protocol versions for the given Config Version. It has also bumped the default Version set in Config (where one is not supplied) to 2.1.0. This is in preparation for Kafka 4.0 dropping support for protocol versions older than 2.1. If you are using Sarama against Kafka clusters older than v2.1.0, or using it against Azure EventHubs then you will likely have to change your application code to pin to the appropriate Version. + +* chore(config): make DefaultVersion V2_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2572 +* chore(config): make DefaultVersion V2_1_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2574 +### :tada: New Features / Improvements +* Implement resolve_canonical_bootstrap_servers_only by @gebn in https://github.com/IBM/sarama/pull/2156 +* feat: sleep when throttled (KIP-219) by @hindessm in https://github.com/IBM/sarama/pull/2536 +* feat: add isValidVersion to protocol types by @dnwe in https://github.com/IBM/sarama/pull/2538 +* fix(consumer): use newer LeaveGroup as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2544 +* Add support for up to version 4 List Groups API by @prestona in https://github.com/IBM/sarama/pull/2541 +* fix(producer): use newer ProduceReq as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2546 +* fix(proto): ensure req+resp requiredVersion match by @dnwe in https://github.com/IBM/sarama/pull/2548 +* chore(proto): permit CreatePartitionsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2549 +* chore(proto): permit AlterConfigsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2550 +* chore(proto): permit DeleteGroupsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2551 +* fix(proto): correct JoinGroup usage for wider version range by @dnwe in https://github.com/IBM/sarama/pull/2553 +* fix(consumer): use full range of FetchRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2554 +* fix(proto): use range of OffsetCommitRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2555 +* fix(proto): use full range of MetadataRequest by @dnwe in https://github.com/IBM/sarama/pull/2556 +* fix(proto): use fuller ranges of supported proto by @dnwe in https://github.com/IBM/sarama/pull/2558 +* fix(proto): use full range of SyncGroupRequest by @dnwe in https://github.com/IBM/sarama/pull/2565 +* fix(proto): use full range of ListGroupsRequest by @dnwe in https://github.com/IBM/sarama/pull/2568 +* feat(proto): support for Metadata V6-V10 by @dnwe in https://github.com/IBM/sarama/pull/2566 +* fix(proto): use full ranges for remaining proto by @dnwe in https://github.com/IBM/sarama/pull/2570 +* feat(proto): add remaining protocol for V2.1 by @dnwe in https://github.com/IBM/sarama/pull/2573 +* feat: add new error for MockDeleteTopicsResponse by @javiercri in https://github.com/IBM/sarama/pull/2475 +* feat(gzip): switch to klauspost/compress gzip by @dnwe in https://github.com/IBM/sarama/pull/2600 +### :bug: Fixes +* fix: correct unsupported version check by @hindessm in https://github.com/IBM/sarama/pull/2528 +* fix: avoiding burning cpu if all partitions are paused by @napallday in https://github.com/IBM/sarama/pull/2532 +* extend throttling metric scope by @hindessm in https://github.com/IBM/sarama/pull/2533 +* Fix printing of final metrics by @prestona in https://github.com/IBM/sarama/pull/2545 +* fix(consumer): cannot automatically fetch newly-added partitions unless restart by @napallday in https://github.com/IBM/sarama/pull/2563 +* bug: implement unsigned modulus for partitioning with crc32 hashing by @csm8118 in https://github.com/IBM/sarama/pull/2560 +* fix: avoid logging value of proxy.Dialer by @prestona in https://github.com/IBM/sarama/pull/2569 +* fix(test): add missing closes to admin client tests by @dnwe in https://github.com/IBM/sarama/pull/2594 +* fix(test): ensure some more clients are closed by @dnwe in https://github.com/IBM/sarama/pull/2595 +* fix(examples): sync exactly_once and consumergroup by @dnwe in https://github.com/IBM/sarama/pull/2614 +* fix(fvt): fresh metrics registry for each test by @dnwe in https://github.com/IBM/sarama/pull/2616 +* fix(test): flaky test TestFuncOffsetManager by @napallday in https://github.com/IBM/sarama/pull/2609 +### :package: Dependency updates +* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2542 +* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2561 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.18 by @dnwe in https://github.com/IBM/sarama/pull/2589 +* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.4 by @dnwe in https://github.com/IBM/sarama/pull/2587 +* chore(deps): bump github.com/eapache/go-xerial-snappy digest to c322873 by @dnwe in https://github.com/IBM/sarama/pull/2586 +* chore(deps): bump module github.com/klauspost/compress to v1.16.7 by @dnwe in https://github.com/IBM/sarama/pull/2588 +* chore(deps): bump github.com/eapache/go-resiliency from 1.3.0 to 1.4.0 by @dependabot in https://github.com/IBM/sarama/pull/2598 +### :wrench: Maintenance +* fix(fvt): ensure fully-replicated at test start by @hindessm in https://github.com/IBM/sarama/pull/2531 +* chore: rollup fvt kafka to latest three by @dnwe in https://github.com/IBM/sarama/pull/2537 +* Merge the two CONTRIBUTING.md's by @prestona in https://github.com/IBM/sarama/pull/2543 +* fix(test): test timing error by @hindessm in https://github.com/IBM/sarama/pull/2552 +* chore(ci): tidyup and improve actions workflows by @dnwe in https://github.com/IBM/sarama/pull/2557 +* fix(test): shutdown MockBroker by @dnwe in https://github.com/IBM/sarama/pull/2571 +* chore(proto): match HeartbeatResponse version by @dnwe in https://github.com/IBM/sarama/pull/2576 +* chore(test): ensure MockBroker closed within test by @dnwe in https://github.com/IBM/sarama/pull/2575 +* chore(test): ensure all mockresponses use version by @dnwe in https://github.com/IBM/sarama/pull/2578 +* chore(ci): use latest Go in actions by @dnwe in https://github.com/IBM/sarama/pull/2580 +* chore(test): speedup some slow tests by @dnwe in https://github.com/IBM/sarama/pull/2579 +* chore(test): use modern protocol versions in FVT by @dnwe in https://github.com/IBM/sarama/pull/2581 +* chore(test): fix a couple of leaks by @dnwe in https://github.com/IBM/sarama/pull/2591 +* feat(fvt): experiment with per-kafka-version image by @dnwe in https://github.com/IBM/sarama/pull/2592 +* chore(ci): replace toxiproxy client dep by @dnwe in https://github.com/IBM/sarama/pull/2593 +* feat(fvt): add healthcheck, depends_on and --wait by @dnwe in https://github.com/IBM/sarama/pull/2601 +* fix(fvt): handle msgset vs batchset by @dnwe in https://github.com/IBM/sarama/pull/2603 +* fix(fvt): Metadata version in ensureFullyReplicated by @dnwe in https://github.com/IBM/sarama/pull/2612 +* fix(fvt): versioned cfg for invalid topic producer by @dnwe in https://github.com/IBM/sarama/pull/2613 +* chore(fvt): tweak to work across more versions by @dnwe in https://github.com/IBM/sarama/pull/2615 +* feat(fvt): test wider range of kafkas by @dnwe in https://github.com/IBM/sarama/pull/2605 +### :memo: Documentation +* fix(example): check if msg channel is closed by @ioanzicu in https://github.com/IBM/sarama/pull/2479 +* chore: use go install for installing sarama tools by @vigith in https://github.com/IBM/sarama/pull/2599 + +## New Contributors +* @gebn made their first contribution in https://github.com/IBM/sarama/pull/2156 +* @prestona made their first contribution in https://github.com/IBM/sarama/pull/2543 +* @ioanzicu made their first contribution in https://github.com/IBM/sarama/pull/2479 +* @csm8118 made their first contribution in https://github.com/IBM/sarama/pull/2560 +* @javiercri made their first contribution in https://github.com/IBM/sarama/pull/2475 +* @vigith made their first contribution in https://github.com/IBM/sarama/pull/2599 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.1...v1.41.0 + +## Version 1.40.1 (2023-07-27) + +## What's Changed +### :tada: New Features / Improvements +* Use buffer pools for decompression by @ronanh in https://github.com/IBM/sarama/pull/2484 +* feat: support for Kerberos authentication with a credentials cache. by @mrogaski in https://github.com/IBM/sarama/pull/2457 +### :bug: Fixes +* Fix some retry issues by @hindessm in https://github.com/IBM/sarama/pull/2517 +* fix: admin retry logic by @hindessm in https://github.com/IBM/sarama/pull/2519 +* Add some retry logic to more admin client functions by @hindessm in https://github.com/IBM/sarama/pull/2520 +* fix: concurrent issue on updateMetadataMs by @napallday in https://github.com/IBM/sarama/pull/2522 +* fix(test): allow testing of skipped test without IsTransactional panic by @hindessm in https://github.com/IBM/sarama/pull/2525 +### :package: Dependency updates +* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2509 +* chore(deps): bump github.com/klauspost/compress from 1.15.14 to 1.16.6 by @dependabot in https://github.com/IBM/sarama/pull/2513 +* chore(deps): bump github.com/stretchr/testify from 1.8.1 to 1.8.3 by @dependabot in https://github.com/IBM/sarama/pull/2512 +### :wrench: Maintenance +* chore(ci): migrate probot-stale to actions/stale by @dnwe in https://github.com/IBM/sarama/pull/2496 +* chore(ci): bump golangci version, cleanup, depguard config by @EladLeev in https://github.com/IBM/sarama/pull/2504 +* Clean up some typos and docs/help mistakes by @hindessm in https://github.com/IBM/sarama/pull/2514 +### :heavy_plus_sign: Other Changes +* chore(ci): add simple apidiff workflow by @dnwe in https://github.com/IBM/sarama/pull/2497 +* chore(ci): bump actions/setup-go from 3 to 4 by @dependabot in https://github.com/IBM/sarama/pull/2508 +* fix(comments): PauseAll and ResumeAll by @napallday in https://github.com/IBM/sarama/pull/2523 + +## New Contributors +* @EladLeev made their first contribution in https://github.com/IBM/sarama/pull/2504 +* @hindessm made their first contribution in https://github.com/IBM/sarama/pull/2514 +* @ronanh made their first contribution in https://github.com/IBM/sarama/pull/2484 +* @mrogaski made their first contribution in https://github.com/IBM/sarama/pull/2457 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.0...v1.40.1 + +## Version 1.40.0 (2023-07-17) + +## What's Changed + +Note: this is the first release after the transition of Sarama ownership from Shopify to IBM in https://github.com/IBM/sarama/issues/2461 + +### :rotating_light: Breaking Changes + +- chore: migrate module to github.com/IBM/sarama by @dnwe in https://github.com/IBM/sarama/pull/2492 +- fix: restore (\*OffsetCommitRequest) AddBlock func by @dnwe in https://github.com/IBM/sarama/pull/2494 + +### :bug: Fixes + +- fix(consumer): don't retry FindCoordinator forever by @dnwe in https://github.com/IBM/sarama/pull/2427 +- fix(metrics): fix race condition when calling Broker.Open() twice by @vincentbernat in https://github.com/IBM/sarama/pull/2428 +- fix: use version 4 of DescribeGroupsRequest only if kafka broker vers… …ion is >= 2.4 by @faillefer in https://github.com/IBM/sarama/pull/2451 +- Fix HighWaterMarkOffset of mocks partition consumer by @gr8web in https://github.com/IBM/sarama/pull/2447 +- fix: prevent data race in balance strategy by @napallday in https://github.com/IBM/sarama/pull/2453 + +### :package: Dependency updates + +- chore(deps): bump golang.org/x/net from 0.5.0 to 0.7.0 by @dependabot in https://github.com/IBM/sarama/pull/2452 + +### :wrench: Maintenance + +- chore: add kafka 3.3.2 by @dnwe in https://github.com/IBM/sarama/pull/2434 +- chore(ci): remove Shopify/shopify-cla-action by @dnwe in https://github.com/IBM/sarama/pull/2489 +- chore: bytes.Equal instead bytes.Compare by @testwill in https://github.com/IBM/sarama/pull/2485 + +## New Contributors + +- @dependabot made their first contribution in https://github.com/IBM/sarama/pull/2452 +- @gr8web made their first contribution in https://github.com/IBM/sarama/pull/2447 +- @testwill made their first contribution in https://github.com/IBM/sarama/pull/2485 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.1...v1.40.0 + +## Version 1.38.1 (2023-01-22) + +## What's Changed +### :bug: Fixes +* fix(example): correct `records-number` param in txn producer readme by @diallo-han in https://github.com/IBM/sarama/pull/2420 +* fix: use newConsumer method in newConsumerGroup method by @Lumotheninja in https://github.com/IBM/sarama/pull/2424 +### :package: Dependency updates +* chore(deps): bump module github.com/klauspost/compress to v1.15.14 by @dnwe in https://github.com/IBM/sarama/pull/2410 +* chore(deps): bump module golang.org/x/net to v0.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2413 +* chore(deps): bump module github.com/stretchr/testify to v1.8.1 by @dnwe in https://github.com/IBM/sarama/pull/2411 +* chore(deps): bump module github.com/xdg-go/scram to v1.1.2 by @dnwe in https://github.com/IBM/sarama/pull/2412 +* chore(deps): bump module golang.org/x/sync to v0.1.0 by @dnwe in https://github.com/IBM/sarama/pull/2414 +* chore(deps): bump github.com/eapache/go-xerial-snappy digest to bf00bc1 by @dnwe in https://github.com/IBM/sarama/pull/2418 + +## New Contributors +* @diallo-han made their first contribution in https://github.com/IBM/sarama/pull/2420 +* @Lumotheninja made their first contribution in https://github.com/IBM/sarama/pull/2424 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.0...v1.38.1 + +## Version 1.38.0 (2023-01-08) + +## What's Changed +### :tada: New Features / Improvements +* feat(producer): improve memory usage of zstd encoder by using our own pool management by @rtreffer in https://github.com/IBM/sarama/pull/2375 +* feat(proto): implement and use MetadataRequest v7 by @dnwe in https://github.com/IBM/sarama/pull/2388 +* feat(metrics): add protocol-requests-rate metric by @auntan in https://github.com/IBM/sarama/pull/2373 +### :bug: Fixes +* fix(proto): track and supply leader epoch to FetchRequest by @dnwe in https://github.com/IBM/sarama/pull/2389 +* fix(example): improve arg name used for tls skip verify by @michaeljmarshall in https://github.com/IBM/sarama/pull/2385 +* fix(zstd): default back to GOMAXPROCS concurrency by @bgreenlee in https://github.com/IBM/sarama/pull/2404 +* fix(producer): add nil check while producer is retrying by @hsweif in https://github.com/IBM/sarama/pull/2387 +* fix(producer): return errors for every message in retryBatch to avoid producer hang forever by @cch123 in https://github.com/IBM/sarama/pull/2378 +* fix(metrics): fix race when accessing metric registry by @vincentbernat in https://github.com/IBM/sarama/pull/2409 +### :package: Dependency updates +* chore(deps): bump golang.org/x/net to v0.4.0 by @dnwe in https://github.com/IBM/sarama/pull/2403 +### :wrench: Maintenance +* chore(ci): replace set-output command in GH Action by @dnwe in https://github.com/IBM/sarama/pull/2390 +* chore(ci): include kafka 3.3.1 in testing matrix by @dnwe in https://github.com/IBM/sarama/pull/2406 + +## New Contributors +* @michaeljmarshall made their first contribution in https://github.com/IBM/sarama/pull/2385 +* @bgreenlee made their first contribution in https://github.com/IBM/sarama/pull/2404 +* @hsweif made their first contribution in https://github.com/IBM/sarama/pull/2387 +* @cch123 made their first contribution in https://github.com/IBM/sarama/pull/2378 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.2...v1.38.0 + +## Version 1.37.2 (2022-10-04) + +## What's Changed +### :bug: Fixes +* fix: ensure updateMetaDataMs is 64-bit aligned by @dnwe in https://github.com/IBM/sarama/pull/2356 +### :heavy_plus_sign: Other Changes +* fix: bump go.mod specification to go 1.17 by @dnwe in https://github.com/IBM/sarama/pull/2357 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.1...v1.37.2 + +## Version 1.37.1 (2022-10-04) + +## What's Changed +### :bug: Fixes +* fix: support existing deprecated Rebalance.Strategy field usage by @spongecaptain in https://github.com/IBM/sarama/pull/2352 +* fix(test): consumer group rebalance strategy compatibility by @Jacob-bzx in https://github.com/IBM/sarama/pull/2353 +* fix(producer): replace time.After with time.Timer to avoid high memory usage by @Jacob-bzx in https://github.com/IBM/sarama/pull/2355 + +## New Contributors +* @spongecaptain made their first contribution in https://github.com/IBM/sarama/pull/2352 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.0...v1.37.1 + +## Version 1.37.0 (2022-09-28) + +## What's Changed + +### :rotating_light: Breaking Changes +* Due to a change in [github.com/klauspost/compress v1.15.10](https://github.com/klauspost/compress/releases/tag/v1.15.10), Sarama v1.37.0 requires Go 1.17 going forward, unfortunately due to an oversight this wasn't reflected in the go.mod declaration at time of release. + +### :tada: New Features / Improvements +* feat(consumer): support multiple balance strategies by @Jacob-bzx in https://github.com/IBM/sarama/pull/2339 +* feat(producer): transactional API by @ryarnyah in https://github.com/IBM/sarama/pull/2295 +* feat(mocks): support key in MockFetchResponse. by @Skandalik in https://github.com/IBM/sarama/pull/2328 +### :bug: Fixes +* fix: avoid panic when Metadata.RefreshFrequency is 0 by @Jacob-bzx in https://github.com/IBM/sarama/pull/2329 +* fix(consumer): avoid pushing unrelated responses to paused children by @pkoutsovasilis in https://github.com/IBM/sarama/pull/2317 +* fix: prevent metrics leak with cleanup by @auntan in https://github.com/IBM/sarama/pull/2340 +* fix: race condition(may panic) when closing consumer group by @Jacob-bzx in https://github.com/IBM/sarama/pull/2331 +* fix(consumer): default ResetInvalidOffsets to true by @dnwe in https://github.com/IBM/sarama/pull/2345 +* Validate the `Config` when creating a mock producer/consumer by @joewreschnig in https://github.com/IBM/sarama/pull/2327 +### :package: Dependency updates +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.16 by @dnwe in https://github.com/IBM/sarama/pull/2335 +* chore(deps): bump golang.org/x/net digest to bea034e by @dnwe in https://github.com/IBM/sarama/pull/2333 +* chore(deps): bump golang.org/x/sync digest to 7f9b162 by @dnwe in https://github.com/IBM/sarama/pull/2334 +* chore(deps): bump golang.org/x/net digest to f486391 by @dnwe in https://github.com/IBM/sarama/pull/2348 +* chore(deps): bump module github.com/shopify/toxiproxy/v2 to v2.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2336 +* chore(deps): bump module github.com/klauspost/compress to v1.15.11 by @dnwe in https://github.com/IBM/sarama/pull/2349 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.17 by @dnwe in https://github.com/IBM/sarama/pull/2350 +### :wrench: Maintenance +* chore(ci): bump kafka-versions to latest by @dnwe in https://github.com/IBM/sarama/pull/2346 +* chore(ci): bump go-versions to N and N-1 by @dnwe in https://github.com/IBM/sarama/pull/2347 + +## New Contributors +* @Jacob-bzx made their first contribution in https://github.com/IBM/sarama/pull/2329 +* @pkoutsovasilis made their first contribution in https://github.com/IBM/sarama/pull/2317 +* @Skandalik made their first contribution in https://github.com/IBM/sarama/pull/2328 +* @auntan made their first contribution in https://github.com/IBM/sarama/pull/2340 +* @ryarnyah made their first contribution in https://github.com/IBM/sarama/pull/2295 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.36.0...v1.37.0 + +## Version 1.36.0 (2022-08-11) + +## What's Changed +### :tada: New Features / Improvements +* feat: add option to propagate OffsetOutOfRange error by @dkolistratova in https://github.com/IBM/sarama/pull/2252 +* feat(producer): expose ProducerMessage.byteSize() function by @k8scat in https://github.com/IBM/sarama/pull/2315 +* feat(metrics): track consumer fetch request rates by @dnwe in https://github.com/IBM/sarama/pull/2299 +### :bug: Fixes +* fix(consumer): avoid submitting empty fetch requests when paused by @raulnegreiros in https://github.com/IBM/sarama/pull/2143 +### :package: Dependency updates +* chore(deps): bump module github.com/klauspost/compress to v1.15.9 by @dnwe in https://github.com/IBM/sarama/pull/2304 +* chore(deps): bump golang.org/x/net digest to c7608f3 by @dnwe in https://github.com/IBM/sarama/pull/2301 +* chore(deps): bump golangci/golangci-lint-action action to v3 by @dnwe in https://github.com/IBM/sarama/pull/2311 +* chore(deps): bump golang.org/x/net digest to 07c6da5 by @dnwe in https://github.com/IBM/sarama/pull/2307 +* chore(deps): bump github actions versions (major) by @dnwe in https://github.com/IBM/sarama/pull/2313 +* chore(deps): bump module github.com/jcmturner/gofork to v1.7.6 by @dnwe in https://github.com/IBM/sarama/pull/2305 +* chore(deps): bump golang.org/x/sync digest to 886fb93 by @dnwe in https://github.com/IBM/sarama/pull/2302 +* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.3 by @dnwe in https://github.com/IBM/sarama/pull/2303 +### :wrench: Maintenance +* chore: add kafka 3.1.1 to the version matrix by @dnwe in https://github.com/IBM/sarama/pull/2300 +### :heavy_plus_sign: Other Changes +* Migrate off probot-CLA to new GitHub Action by @cursedcoder in https://github.com/IBM/sarama/pull/2294 +* Forgot to remove cla probot by @cursedcoder in https://github.com/IBM/sarama/pull/2297 +* chore(lint): re-enable a small amount of go-critic by @dnwe in https://github.com/IBM/sarama/pull/2312 + +## New Contributors +* @cursedcoder made their first contribution in https://github.com/IBM/sarama/pull/2294 +* @dkolistratova made their first contribution in https://github.com/IBM/sarama/pull/2252 +* @k8scat made their first contribution in https://github.com/IBM/sarama/pull/2315 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.35.0...v1.36.0 + +## Version 1.35.0 (2022-07-22) + +## What's Changed +### :bug: Fixes +* fix: fix metadata retry backoff invalid when get metadata failed by @Stephan14 in https://github.com/IBM/sarama/pull/2256 +* fix(balance): sort and de-deplicate memberIDs by @dnwe in https://github.com/IBM/sarama/pull/2285 +* fix: prevent DescribeLogDirs hang in admin client by @zerowidth in https://github.com/IBM/sarama/pull/2269 +* fix: include assignment-less members in SyncGroup by @dnwe in https://github.com/IBM/sarama/pull/2292 +### :package: Dependency updates +* chore(deps): bump module github.com/stretchr/testify to v1.8.0 by @dnwe in https://github.com/IBM/sarama/pull/2284 +* chore(deps): bump module github.com/eapache/go-resiliency to v1.3.0 by @dnwe in https://github.com/IBM/sarama/pull/2283 +* chore(deps): bump golang.org/x/net digest to 1185a90 by @dnwe in https://github.com/IBM/sarama/pull/2279 +* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.15 by @dnwe in https://github.com/IBM/sarama/pull/2281 +* chore(deps): bump module github.com/klauspost/compress to v1.15.8 by @dnwe in https://github.com/IBM/sarama/pull/2280 +### :wrench: Maintenance +* chore: rename `any` func to avoid identifier by @dnwe in https://github.com/IBM/sarama/pull/2272 +* chore: add and test against kafka 3.2.0 by @dnwe in https://github.com/IBM/sarama/pull/2288 +* chore: document Fetch protocol fields by @dnwe in https://github.com/IBM/sarama/pull/2289 +### :heavy_plus_sign: Other Changes +* chore(ci): fix redirect with GITHUB_STEP_SUMMARY by @dnwe in https://github.com/IBM/sarama/pull/2286 +* fix(test): permit ECONNRESET in TestInitProducerID by @dnwe in https://github.com/IBM/sarama/pull/2287 +* fix: ensure empty or devel version valid by @dnwe in https://github.com/IBM/sarama/pull/2291 + +## New Contributors +* @zerowidth made their first contribution in https://github.com/IBM/sarama/pull/2269 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.1...v1.35.0 + +## Version 1.34.1 (2022-06-07) + +## What's Changed +### :bug: Fixes +* fix(examples): check session.Context().Done() in examples/consumergroup by @zxc111 in https://github.com/IBM/sarama/pull/2240 +* fix(protocol): move AuthorizedOperations into GroupDescription of DescribeGroupsResponse by @aiquestion in https://github.com/IBM/sarama/pull/2247 +* fix(protocol): tidyup DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2248 +* fix(consumer): range balance strategy not like reference by @njhartwell in https://github.com/IBM/sarama/pull/2245 +### :wrench: Maintenance +* chore(ci): experiment with using tparse by @dnwe in https://github.com/IBM/sarama/pull/2236 +* chore(deps): bump thirdparty dependencies to latest releases by @dnwe in https://github.com/IBM/sarama/pull/2242 + +## New Contributors +* @zxc111 made their first contribution in https://github.com/IBM/sarama/pull/2240 +* @njhartwell made their first contribution in https://github.com/IBM/sarama/pull/2245 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.0...v1.34.1 + +## Version 1.34.0 (2022-05-30) + +## What's Changed +### :tada: New Features / Improvements +* KIP-345: support static membership by @aiquestion in https://github.com/IBM/sarama/pull/2230 +### :bug: Fixes +* fix: KIP-368 use receiver goroutine to process all sasl v1 responses by @k-wall in https://github.com/IBM/sarama/pull/2234 +### :wrench: Maintenance +* chore(deps): bump module github.com/pierrec/lz4 to v4 by @dnwe in https://github.com/IBM/sarama/pull/2231 +* chore(deps): bump golang.org/x/net digest to 2e3eb7b by @dnwe in https://github.com/IBM/sarama/pull/2232 + +## New Contributors +* @aiquestion made their first contribution in https://github.com/IBM/sarama/pull/2230 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.33.0...v1.34.0 + +## Version 1.33.0 (2022-05-11) + +## What's Changed +### :rotating_light: Breaking Changes + +**Note: with this change, the user of Sarama is required to use Go 1.13's errors.Is etc (rather then ==) when forming conditionals returned by this library.** +* feat: make `ErrOutOfBrokers` wrap the underlying error that prevented connections to the brokers by @k-wall in https://github.com/IBM/sarama/pull/2131 + + +### :tada: New Features / Improvements +* feat(message): add UnmarshalText method to CompressionCodec by @vincentbernat in https://github.com/IBM/sarama/pull/2172 +* KIP-368 : Allow SASL Connections to Periodically Re-Authenticate by @k-wall in https://github.com/IBM/sarama/pull/2197 +* feat: add batched CreateACLs func to ClusterAdmin by @nkostoulas in https://github.com/IBM/sarama/pull/2191 +### :bug: Fixes +* fix: TestRecordBatchDecoding failing sporadically by @k-wall in https://github.com/IBM/sarama/pull/2154 +* feat(test): add an fvt for broker deadlock by @dnwe in https://github.com/IBM/sarama/pull/2144 +* fix: avoid starvation in subscriptionManager by @dnwe in https://github.com/IBM/sarama/pull/2109 +* fix: remove "Is your cluster reachable?" from msg by @dnwe in https://github.com/IBM/sarama/pull/2165 +* fix: remove trailing fullstop from error strings by @dnwe in https://github.com/IBM/sarama/pull/2166 +* fix: return underlying sasl error message by @dnwe in https://github.com/IBM/sarama/pull/2164 +* fix: potential data race on a global variable by @pior in https://github.com/IBM/sarama/pull/2171 +* fix: AdminClient | CreateACLs | check for error in response, return error if needed by @omris94 in https://github.com/IBM/sarama/pull/2185 +* producer: ensure that the management message (fin) is never "leaked" by @niamster in https://github.com/IBM/sarama/pull/2182 +* fix: prevent RefreshBrokers leaking old brokers by @k-wall in https://github.com/IBM/sarama/pull/2203 +* fix: prevent RefreshController leaking controller by @k-wall in https://github.com/IBM/sarama/pull/2204 +* fix: prevent AsyncProducer retryBatch from leaking by @k-wall in https://github.com/IBM/sarama/pull/2208 +* fix: prevent metrics leak when authenticate fails by @Stephan14 in https://github.com/IBM/sarama/pull/2205 +* fix: prevent deadlock between subscription manager and consumer goroutines by @niamster in https://github.com/IBM/sarama/pull/2194 +* fix: prevent idempotent producer epoch exhaustion by @ladislavmacoun in https://github.com/IBM/sarama/pull/2178 +* fix(test): mockbroker offsetResponse vers behavior by @dnwe in https://github.com/IBM/sarama/pull/2213 +* fix: cope with OffsetsLoadInProgress on Join+Sync by @dnwe in https://github.com/IBM/sarama/pull/2214 +* fix: make default MaxWaitTime 500ms by @dnwe in https://github.com/IBM/sarama/pull/2227 +### :package: Dependency updates +* chore(deps): bump xdg-go/scram and klauspost/compress by @dnwe in https://github.com/IBM/sarama/pull/2170 +### :wrench: Maintenance +* fix(test): skip TestReadOnlyAndAllCommittedMessages by @dnwe in https://github.com/IBM/sarama/pull/2161 +* fix(test): remove t.Parallel() by @dnwe in https://github.com/IBM/sarama/pull/2162 +* chore(ci): bump along to Go 1.17+1.18 and bump golangci-lint by @dnwe in https://github.com/IBM/sarama/pull/2183 +* chore: switch to multi-arch compatible docker images by @dnwe in https://github.com/IBM/sarama/pull/2210 +### :heavy_plus_sign: Other Changes +* Remediate a number go-routine leaks (mainly test issues) by @k-wall in https://github.com/IBM/sarama/pull/2198 +* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 +* chore: bump functional test timeout to 12m by @dnwe in https://github.com/IBM/sarama/pull/2200 +* fix(admin): make DeleteRecords err consistent by @dnwe in https://github.com/IBM/sarama/pull/2226 + +## New Contributors +* @k-wall made their first contribution in https://github.com/IBM/sarama/pull/2154 +* @pior made their first contribution in https://github.com/IBM/sarama/pull/2171 +* @omris94 made their first contribution in https://github.com/IBM/sarama/pull/2185 +* @vincentbernat made their first contribution in https://github.com/IBM/sarama/pull/2172 +* @niamster made their first contribution in https://github.com/IBM/sarama/pull/2182 +* @ladislavmacoun made their first contribution in https://github.com/IBM/sarama/pull/2178 +* @nkostoulas made their first contribution in https://github.com/IBM/sarama/pull/2191 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.32.0...v1.33.0 + +## Version 1.32.0 (2022-02-24) + +### ⚠️ This release has been superseded by v1.33.0 and should _not_ be used. + +* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199 + +--- + +## What's Changed +### :bug: Fixes +* Fix deadlock when closing Broker in brokerProducer by @slaunay in https://github.com/IBM/sarama/pull/2133 +### :package: Dependency updates +* chore: refresh dependencies to latest by @dnwe in https://github.com/IBM/sarama/pull/2159 +### :wrench: Maintenance +* fix: rework RebalancingMultiplePartitions test by @dnwe in https://github.com/IBM/sarama/pull/2130 +* fix(test): use Sarama transactional producer by @dnwe in https://github.com/IBM/sarama/pull/1939 +* chore: enable t.Parallel() wherever possible by @dnwe in https://github.com/IBM/sarama/pull/2138 +### :heavy_plus_sign: Other Changes +* chore: restrict to 1 testbinary at once by @dnwe in https://github.com/IBM/sarama/pull/2145 +* chore: restrict to 1 parallel test at once by @dnwe in https://github.com/IBM/sarama/pull/2146 +* Remove myself from codeowners by @bai in https://github.com/IBM/sarama/pull/2147 +* chore: add retractions for known bad versions by @dnwe in https://github.com/IBM/sarama/pull/2160 + + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.31.1...v1.32.0 + +## Version 1.31.1 (2022-02-01) + +- #2126 - @bai - Populate missing kafka versions +- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image +- #2123 - @bai - Update klauspost/compress to 0.14 +- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy +- #2119 - @bai - Add Kafka 3.1.0 version number +- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption +- #2051 - @seveas - Expose the TLS connection state of a broker connection +- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys +- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup +- #2113 - @mosceo - Fix typo + +## Version 1.31.0 (2022-01-18) + +## What's Changed +### :tada: New Features / Improvements +* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/IBM/sarama/pull/2088 +* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/IBM/sarama/pull/1686 +* Support request pipelining in AsyncProducer by @slaunay in https://github.com/IBM/sarama/pull/2094 +### :bug: Fixes +* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/IBM/sarama/pull/2080 +* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/IBM/sarama/pull/2081 +* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/IBM/sarama/pull/2082 +* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/IBM/sarama/pull/2096 +* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/IBM/sarama/pull/2107 +* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/IBM/sarama/pull/2108 +* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/IBM/sarama/pull/2078 +* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2111 +### :wrench: Maintenance +* chore: bump runtime and test dependencies by @dnwe in https://github.com/IBM/sarama/pull/2100 +### :memo: Documentation +* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2099 +### :heavy_plus_sign: Other Changes +* Fix typo by @mosceo in https://github.com/IBM/sarama/pull/2084 + +## New Contributors +* @grongor made their first contribution in https://github.com/IBM/sarama/pull/2080 +* @fengyinqiao made their first contribution in https://github.com/IBM/sarama/pull/2088 +* @xujianhai666 made their first contribution in https://github.com/IBM/sarama/pull/1686 +* @mosceo made their first contribution in https://github.com/IBM/sarama/pull/2084 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.1...v1.31.0 + +## Version 1.30.1 (2021-12-04) + +## What's Changed +### :tada: New Features / Improvements +* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/IBM/sarama/pull/2045 +### :bug: Fixes +* fix: set min-go-version to 1.16 by @troyanov in https://github.com/IBM/sarama/pull/2048 +* logger: fix debug logs' formatting directives by @utrack in https://github.com/IBM/sarama/pull/2054 +* fix: stuck on the batch with zero records length by @pachmu in https://github.com/IBM/sarama/pull/2057 +* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/IBM/sarama/pull/2076 +### :wrench: Maintenance +* chore: add release notes configuration by @dnwe in https://github.com/IBM/sarama/pull/2046 +* chore: confluent platform version bump by @lizthegrey in https://github.com/IBM/sarama/pull/2070 + +## Notes +* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x + +## New Contributors +* @troyanov made their first contribution in https://github.com/IBM/sarama/pull/2048 +* @lizthegrey made their first contribution in https://github.com/IBM/sarama/pull/2045 +* @utrack made their first contribution in https://github.com/IBM/sarama/pull/2054 +* @pachmu made their first contribution in https://github.com/IBM/sarama/pull/2057 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.0...v1.30.1 + +## Version 1.30.0 (2021-09-29) + +⚠️ This release has been superseded by v1.30.1 and should _not_ be used. + +**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 + +--- + +ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** + +--- + +# New Features / Improvements + +- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh +- #2000 - @matzew - Using xdg-go module for SCRAM +- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures +- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM +- #2006 - @faillefer - Add support for DeleteOffsets operation +- #1909 - @agriffaut - KIP-546 Client quota APIs +- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state +- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger +- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log +- #2019 - @dnwe - feat: add logging & a metric for producer throttle +- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface +- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol +- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open +- #2034 - @bai - Add support for kafka 3.0.0 + +# Fixes + +- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest +- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation +- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls +- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true +- #2007 - @bai - Add support for Go 1.17 +- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks +- #2010 - @dnwe - chore: enable exportloopref and misspell linters +- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements +- #2015 - @bai - Change default branch to main +- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() +- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 +- #2016 - @dnwe - chore: replace deprecated Go calls +- #2017 - @dnwe - chore: delete legacy vagrant script +- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test +- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 +- #2033 - @bai - Update dependencies +- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method +- #2035 - @dnwe - chore: populate the missing kafka versions +- #2038 - @dnwe - feat: add a fuzzing workflow to github actions + +## New Contributors +* @zifengyu made their first contribution in https://github.com/IBM/sarama/pull/1983 +* @doxsch made their first contribution in https://github.com/IBM/sarama/pull/1990 +* @LubergAlexander made their first contribution in https://github.com/IBM/sarama/pull/1988 +* @HurSungYun made their first contribution in https://github.com/IBM/sarama/pull/2001 +* @gdm85 made their first contribution in https://github.com/IBM/sarama/pull/2003 +* @qiangmzsx made their first contribution in https://github.com/IBM/sarama/pull/1973 +* @zhaomoran made their first contribution in https://github.com/IBM/sarama/pull/1992 +* @faillefer made their first contribution in https://github.com/IBM/sarama/pull/2006 +* @crivera-fastly made their first contribution in https://github.com/IBM/sarama/pull/1718 +* @null-sleep made their first contribution in https://github.com/IBM/sarama/pull/1984 + +**Full Changelog**: https://github.com/IBM/sarama/compare/v1.29.1...v1.30.0 + +## Version 1.29.1 (2021-06-24) + +# New Features / Improvements + +- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API +- #1964 - @ajanikow - Add DelegationToken ResourceType + +# Fixes + +- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire +- #1971 - @KerryJava - fix kafka-producer-performance throughput panic +- #1968 - @dnwe - chore: bump golang.org/x versions +- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers +- #1963 - @dnwe - fix: ensure backoff timer is re-used +- #1949 - @dnwe - fix: explicitly use uint64 for payload length + +## Version 1.29.0 (2021-05-07) + +### New Features / Improvements + +- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API +- #1869 - @wyndhblb - zstd: encode+decode performance improvements +- #1541 - @izolight - add String, (Un)MarshalText for acl types. +- #1921 - @bai - Add support for Kafka 2.8.0 + +### Fixes +- #1936 - @dnwe - fix(consumer): follow preferred broker +- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) +- #1926 - @dnwe - fix: correct initial CodeQL findings +- #1925 - @bai - Test out CodeQL +- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos +- #1922 - @bai - Update go dependencies +- #1898 - @mmaslankaprv - Parsing only known control batches value +- #1887 - @withshubh - Fix: issues affecting code quality + +## Version 1.28.0 (2021-02-15) + +**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** + +- #1870 - @kvch - Update Kerberos library to latest major +- #1876 - @bai - Update docs, reference pkg.go.dev +- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close +- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages +- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies +- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy +- #1862 - @bai - Fix CI setenv permissions issues +- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev +- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica + +## Version 1.27.2 (2020-10-21) + +### Improvements + +#1750 - @krantideep95 Adds missing mock responses for mocking consumer group + +## Fixes + +#1817 - reverts #1785 - Add private method to Client interface to prevent implementation + +## Version 1.27.1 (2020-10-07) + +### Improvements + +#1775 - @d1egoaz - Adds a Producer Interceptor example +#1781 - @justin-chen - Refresh brokers given list of seed brokers +#1784 - @justin-chen - Add randomize seed broker method +#1790 - @d1egoaz - remove example binary +#1798 - @bai - Test against Go 1.15 +#1785 - @justin-chen - Add private method to Client interface to prevent implementation +#1802 - @uvw - Support Go 1.13 error unwrapping + +## Fixes + +#1791 - @stanislavkozlovski - bump default version to 1.0.0 + +## Version 1.27.0 (2020-08-11) + +### Improvements + +#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration +#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests +#1699 - @wclaeys - Consumer group support for manually comitting offsets +#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 +#1726 - @d1egoaz - Include zstd on the functional tests +#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors +#1738 - @varun06 - fixed variable names that are named same as some std lib package names +#1741 - @varun06 - updated zstd dependency to latest v1.10.10 +#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base +#1763 - @alrs - remove deprecated tls options from test +#1769 - @bai - Add support for Kafka 2.6.0 + +## Fixes + +#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication +#1744 - @alrs - Fix isBalanced Function Signature + +## Version 1.26.4 (2020-05-19) + +## Fixes + +- #1701 - @d1egoaz - Set server name only for the current broker +- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka + +## Version 1.26.3 (2020-05-07) + +## Fixes + +- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config + +## Version 1.26.2 (2020-05-06) + +## ⚠️ Known Issues + +This release has been marked as not ready for production and may be unstable, please use v1.26.4. + +### Improvements + +- #1560 - @iyacontrol - add sync pool for gzip 1-9 +- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID +- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs +- #1632 - @bai - Add support for Go 1.14 +- #1640 - @random-dwi - Feature/fix list partition reassignments +- #1646 - @mimaison - Add DescribeLogDirs to admin client +- #1667 - @bai - Add support for kafka 2.5.0 + +## Fixes + +- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 +- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine +- #1602 - @d1egoaz - adds a note about consumer groups Consume method +- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly +- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented +- #1614 - @alrs - produce_response.go: Remove Unused Functions +- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables +- #1639 - @agriffaut - Handle errors with no message but error code +- #1643 - @kzinglzy - fix `config.net.keepalive` +- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs +- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata +- #1650 - @lavoiesl - Return the response error in heartbeatLoop +- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die +- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. + +## Version 1.26.1 (2020-02-04) + +Improvements: +- Add requests-in-flight metric ([1539](https://github.com/IBM/sarama/pull/1539)) +- Fix misleading example for cluster admin ([1595](https://github.com/IBM/sarama/pull/1595)) +- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/IBM/sarama/pull/1573)) +- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/IBM/sarama/pull/1592)) + +Bug Fixes: +- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/IBM/sarama/pull/1590)) +- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/IBM/sarama/pull/1589)) + +## Version 1.26.0 (2020-01-24) + +New Features: +- Enable zstd compression + ([1574](https://github.com/IBM/sarama/pull/1574), + [1582](https://github.com/IBM/sarama/pull/1582)) +- Support headers in tools kafka-console-producer + ([1549](https://github.com/IBM/sarama/pull/1549)) + +Improvements: +- Add SASL AuthIdentity to SASL frames (authzid) + ([1585](https://github.com/IBM/sarama/pull/1585)). + +Bug Fixes: +- Sending messages with ZStd compression enabled fails in multiple ways + ([1252](https://github.com/IBM/sarama/issues/1252)). +- Use the broker for any admin on BrokerConfig + ([1571](https://github.com/IBM/sarama/pull/1571)). +- Set DescribeConfigRequest Version field + ([1576](https://github.com/IBM/sarama/pull/1576)). +- ConsumerGroup flooding logs with client/metadata update req + ([1578](https://github.com/IBM/sarama/pull/1578)). +- MetadataRequest version in DescribeCluster + ([1580](https://github.com/IBM/sarama/pull/1580)). +- Fix deadlock in consumer group handleError + ([1581](https://github.com/IBM/sarama/pull/1581)) +- Fill in the Fetch{Request,Response} protocol + ([1582](https://github.com/IBM/sarama/pull/1582)). +- Retry topic request on ControllerNotAvailable + ([1586](https://github.com/IBM/sarama/pull/1586)). + +## Version 1.25.0 (2020-01-13) + +New Features: +- Support TLS protocol in kafka-producer-performance + ([1538](https://github.com/IBM/sarama/pull/1538)). +- Add support for kafka 2.4.0 + ([1552](https://github.com/IBM/sarama/pull/1552)). + +Improvements: +- Allow the Consumer to disable auto-commit offsets + ([1164](https://github.com/IBM/sarama/pull/1164)). +- Produce records with consistent timestamps + ([1455](https://github.com/IBM/sarama/pull/1455)). + +Bug Fixes: +- Fix incorrect SetTopicMetadata name mentions + ([1534](https://github.com/IBM/sarama/pull/1534)). +- Fix client.tryRefreshMetadata Println + ([1535](https://github.com/IBM/sarama/pull/1535)). +- Fix panic on calling updateMetadata on closed client + ([1531](https://github.com/IBM/sarama/pull/1531)). +- Fix possible faulty metrics in TestFuncProducing + ([1545](https://github.com/IBM/sarama/pull/1545)). + +## Version 1.24.1 (2019-10-31) + +New Features: +- Add DescribeLogDirs Request/Response pair + ([1520](https://github.com/IBM/sarama/pull/1520)). + +Bug Fixes: +- Fix ClusterAdmin returning invalid controller ID on DescribeCluster + ([1518](https://github.com/IBM/sarama/pull/1518)). +- Fix issue with consumergroup not rebalancing when new partition is added + ([1525](https://github.com/IBM/sarama/pull/1525)). +- Ensure consistent use of read/write deadlines + ([1529](https://github.com/IBM/sarama/pull/1529)). + +## Version 1.24.0 (2019-10-09) + +New Features: +- Add sticky partition assignor + ([1416](https://github.com/IBM/sarama/pull/1416)). +- Switch from cgo zstd package to pure Go implementation + ([1477](https://github.com/IBM/sarama/pull/1477)). + +Improvements: +- Allow creating ClusterAdmin from client + ([1415](https://github.com/IBM/sarama/pull/1415)). +- Set KafkaVersion in ListAcls method + ([1452](https://github.com/IBM/sarama/pull/1452)). +- Set request version in CreateACL ClusterAdmin method + ([1458](https://github.com/IBM/sarama/pull/1458)). +- Set request version in DeleteACL ClusterAdmin method + ([1461](https://github.com/IBM/sarama/pull/1461)). +- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest + ([1464](https://github.com/IBM/sarama/pull/1464)). +- Remove direct usage of gofork + ([1465](https://github.com/IBM/sarama/pull/1465)). +- Add support for Go 1.13 + ([1478](https://github.com/IBM/sarama/pull/1478)). +- Improve behavior of NewMockListAclsResponse + ([1481](https://github.com/IBM/sarama/pull/1481)). + +Bug Fixes: +- Fix race condition in consumergroup example + ([1434](https://github.com/IBM/sarama/pull/1434)). +- Fix brokerProducer goroutine leak + ([1442](https://github.com/IBM/sarama/pull/1442)). +- Use released version of lz4 library + ([1469](https://github.com/IBM/sarama/pull/1469)). +- Set correct version in MockDeleteTopicsResponse + ([1484](https://github.com/IBM/sarama/pull/1484)). +- Fix CLI help message typo + ([1494](https://github.com/IBM/sarama/pull/1494)). + +Known Issues: +- Please **don't** use Zstd, as it doesn't work right now. + See https://github.com/IBM/sarama/issues/1252 + +## Version 1.23.1 (2019-07-22) + +Bug Fixes: +- Fix fetch delete bug record + ([1425](https://github.com/IBM/sarama/pull/1425)). +- Handle SASL/OAUTHBEARER token rejection + ([1428](https://github.com/IBM/sarama/pull/1428)). + +## Version 1.23.0 (2019-07-02) + +New Features: +- Add support for Kafka 2.3.0 + ([1418](https://github.com/IBM/sarama/pull/1418)). +- Add support for ListConsumerGroupOffsets v2 + ([1374](https://github.com/IBM/sarama/pull/1374)). +- Add support for DeleteConsumerGroup + ([1417](https://github.com/IBM/sarama/pull/1417)). +- Add support for SASLVersion configuration + ([1410](https://github.com/IBM/sarama/pull/1410)). +- Add kerberos support + ([1366](https://github.com/IBM/sarama/pull/1366)). + +Improvements: +- Improve sasl_scram_client example + ([1406](https://github.com/IBM/sarama/pull/1406)). +- Fix shutdown and race-condition in consumer-group example + ([1404](https://github.com/IBM/sarama/pull/1404)). +- Add support for error codes 77—81 + ([1397](https://github.com/IBM/sarama/pull/1397)). +- Pool internal objects allocated per message + ([1385](https://github.com/IBM/sarama/pull/1385)). +- Reduce packet decoder allocations + ([1373](https://github.com/IBM/sarama/pull/1373)). +- Support timeout when fetching metadata + ([1359](https://github.com/IBM/sarama/pull/1359)). + +Bug Fixes: +- Fix fetch size integer overflow + ([1376](https://github.com/IBM/sarama/pull/1376)). +- Handle and log throttled FetchResponses + ([1383](https://github.com/IBM/sarama/pull/1383)). +- Refactor misspelled word Resouce to Resource + ([1368](https://github.com/IBM/sarama/pull/1368)). + +## Version 1.22.1 (2019-04-29) + +Improvements: +- Use zstd 1.3.8 + ([1350](https://github.com/IBM/sarama/pull/1350)). +- Add support for SaslHandshakeRequest v1 + ([1354](https://github.com/IBM/sarama/pull/1354)). + +Bug Fixes: +- Fix V5 MetadataRequest nullable topics array + ([1353](https://github.com/IBM/sarama/pull/1353)). +- Use a different SCRAM client for each broker connection + ([1349](https://github.com/IBM/sarama/pull/1349)). +- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 + ([1344](https://github.com/IBM/sarama/pull/1344)). + +## Version 1.22.0 (2019-04-09) + +New Features: +- Add Offline Replicas Operation to Client + ([1318](https://github.com/IBM/sarama/pull/1318)). +- Allow using proxy when connecting to broker + ([1326](https://github.com/IBM/sarama/pull/1326)). +- Implement ReadCommitted + ([1307](https://github.com/IBM/sarama/pull/1307)). +- Add support for Kafka 2.2.0 + ([1331](https://github.com/IBM/sarama/pull/1331)). +- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes + ([1331](https://github.com/IBM/sarama/pull/1295)). + +Improvements: +- Unregister all broker metrics on broker stop + ([1232](https://github.com/IBM/sarama/pull/1232)). +- Add SCRAM authentication example + ([1303](https://github.com/IBM/sarama/pull/1303)). +- Add consumergroup examples + ([1304](https://github.com/IBM/sarama/pull/1304)). +- Expose consumer batch size metric + ([1296](https://github.com/IBM/sarama/pull/1296)). +- Add TLS options to console producer and consumer + ([1300](https://github.com/IBM/sarama/pull/1300)). +- Reduce client close bookkeeping + ([1297](https://github.com/IBM/sarama/pull/1297)). +- Satisfy error interface in create responses + ([1154](https://github.com/IBM/sarama/pull/1154)). +- Please lint gods + ([1346](https://github.com/IBM/sarama/pull/1346)). + +Bug Fixes: +- Fix multi consumer group instance crash + ([1338](https://github.com/IBM/sarama/pull/1338)). +- Update lz4 to latest version + ([1347](https://github.com/IBM/sarama/pull/1347)). +- Retry ErrNotCoordinatorForConsumer in new consumergroup session + ([1231](https://github.com/IBM/sarama/pull/1231)). +- Fix cleanup error handler + ([1332](https://github.com/IBM/sarama/pull/1332)). +- Fix rate condition in PartitionConsumer + ([1156](https://github.com/IBM/sarama/pull/1156)). + +## Version 1.21.0 (2019-02-24) + +New Features: +- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest + ([1236](https://github.com/IBM/sarama/pull/1236)). +- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests + ([1178](https://github.com/IBM/sarama/pull/1178)). +- Implement SASL/OAUTHBEARER + ([1240](https://github.com/IBM/sarama/pull/1240)). + +Improvements: +- Add Go mod support + ([1282](https://github.com/IBM/sarama/pull/1282)). +- Add error codes 73—76 + ([1239](https://github.com/IBM/sarama/pull/1239)). +- Add retry backoff function + ([1160](https://github.com/IBM/sarama/pull/1160)). +- Maintain metadata in the producer even when retries are disabled + ([1189](https://github.com/IBM/sarama/pull/1189)). +- Include ReplicaAssignment in ListTopics + ([1274](https://github.com/IBM/sarama/pull/1274)). +- Add producer performance tool + ([1222](https://github.com/IBM/sarama/pull/1222)). +- Add support LogAppend timestamps + ([1258](https://github.com/IBM/sarama/pull/1258)). + +Bug Fixes: +- Fix potential deadlock when a heartbeat request fails + ([1286](https://github.com/IBM/sarama/pull/1286)). +- Fix consuming compacted topic + ([1227](https://github.com/IBM/sarama/pull/1227)). +- Set correct Kafka version for DescribeConfigsRequest v1 + ([1277](https://github.com/IBM/sarama/pull/1277)). +- Update kafka test version + ([1273](https://github.com/IBM/sarama/pull/1273)). + +## Version 1.20.1 (2019-01-10) + +New Features: +- Add optional replica id in offset request + ([1100](https://github.com/IBM/sarama/pull/1100)). + +Improvements: +- Implement DescribeConfigs Request + Response v1 & v2 + ([1230](https://github.com/IBM/sarama/pull/1230)). +- Reuse compression objects + ([1185](https://github.com/IBM/sarama/pull/1185)). +- Switch from png to svg for GoDoc link in README + ([1243](https://github.com/IBM/sarama/pull/1243)). +- Fix typo in deprecation notice for FetchResponseBlock.Records + ([1242](https://github.com/IBM/sarama/pull/1242)). +- Fix typos in consumer metadata response file + ([1244](https://github.com/IBM/sarama/pull/1244)). + +Bug Fixes: +- Revert to individual msg retries for non-idempotent + ([1203](https://github.com/IBM/sarama/pull/1203)). +- Respect MaxMessageBytes limit for uncompressed messages + ([1141](https://github.com/IBM/sarama/pull/1141)). + +## Version 1.20.0 (2018-12-10) + +New Features: + - Add support for zstd compression + ([#1170](https://github.com/IBM/sarama/pull/1170)). + - Add support for Idempotent Producer + ([#1152](https://github.com/IBM/sarama/pull/1152)). + - Add support support for Kafka 2.1.0 + ([#1229](https://github.com/IBM/sarama/pull/1229)). + - Add support support for OffsetCommit request/response pairs versions v1 to v5 + ([#1201](https://github.com/IBM/sarama/pull/1201)). + - Add support support for OffsetFetch request/response pair up to version v5 + ([#1198](https://github.com/IBM/sarama/pull/1198)). + +Improvements: + - Export broker's Rack setting + ([#1173](https://github.com/IBM/sarama/pull/1173)). + - Always use latest patch version of Go on CI + ([#1202](https://github.com/IBM/sarama/pull/1202)). + - Add error codes 61 to 72 + ([#1195](https://github.com/IBM/sarama/pull/1195)). + +Bug Fixes: + - Fix build without cgo + ([#1182](https://github.com/IBM/sarama/pull/1182)). + - Fix go vet suggestion in consumer group file + ([#1209](https://github.com/IBM/sarama/pull/1209)). + - Fix typos in code and comments + ([#1228](https://github.com/IBM/sarama/pull/1228)). + +## Version 1.19.0 (2018-09-27) + +New Features: + - Implement a higher-level consumer group + ([#1099](https://github.com/IBM/sarama/pull/1099)). + +Improvements: + - Add support for Go 1.11 + ([#1176](https://github.com/IBM/sarama/pull/1176)). + +Bug Fixes: + - Fix encoding of `MetadataResponse` with version 2 and higher + ([#1174](https://github.com/IBM/sarama/pull/1174)). + - Fix race condition in mock async producer + ([#1174](https://github.com/IBM/sarama/pull/1174)). + +## Version 1.18.0 (2018-09-07) + +New Features: + - Make `Partitioner.RequiresConsistency` vary per-message + ([#1112](https://github.com/IBM/sarama/pull/1112)). + - Add customizable partitioner + ([#1118](https://github.com/IBM/sarama/pull/1118)). + - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, + `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` + ([#1055](https://github.com/IBM/sarama/pull/1055)). + +Improvements: + - Add support for Kafka 2.0.0 + ([#1149](https://github.com/IBM/sarama/pull/1149)). + - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts + ([#1123](https://github.com/IBM/sarama/pull/1123)). + - Simpler offset management + ([#1127](https://github.com/IBM/sarama/pull/1127)). + +Bug Fixes: + - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka + ([#1110](https://github.com/IBM/sarama/pull/1110)). + - Fix consumer block when response did not contain all the + expected topic/partition blocks + ([#1086](https://github.com/IBM/sarama/pull/1086)). + - Fix consumer block when response contains only constrol messages + ([#1115](https://github.com/IBM/sarama/pull/1115)). + - Add timeout config for ClusterAdmin requests + ([#1142](https://github.com/IBM/sarama/pull/1142)). + - Add version check when producing message with headers + ([#1117](https://github.com/IBM/sarama/pull/1117)). + - Fix `MetadataRequest` for empty list of topics + ([#1132](https://github.com/IBM/sarama/pull/1132)). + - Fix producer topic metadata on-demand fetch when topic error happens in metadata response + ([#1125](https://github.com/IBM/sarama/pull/1125)). + +## Version 1.17.0 (2018-05-30) + +New Features: + - Add support for gzip compression levels + ([#1044](https://github.com/IBM/sarama/pull/1044)). + - Add support for Metadata request/response pairs versions v1 to v5 + ([#1047](https://github.com/IBM/sarama/pull/1047), + [#1069](https://github.com/IBM/sarama/pull/1069)). + - Add versioning to JoinGroup request/response pairs + ([#1098](https://github.com/IBM/sarama/pull/1098)) + - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs + ([#1065](https://github.com/IBM/sarama/pull/1065), + [#1096](https://github.com/IBM/sarama/pull/1096), + [#1027](https://github.com/IBM/sarama/pull/1027)). + - Add `Controller()` method to Client interface + ([#1063](https://github.com/IBM/sarama/pull/1063)). + +Improvements: + - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp + ([#1010](https://github.com/IBM/sarama/pull/1010)). + - Expose missing protocol parts: `msgSet` and `recordBatch` + ([#1049](https://github.com/IBM/sarama/pull/1049)). + - Add support for v1 DeleteTopics Request + ([#1052](https://github.com/IBM/sarama/pull/1052)). + - Add support for Go 1.10 + ([#1064](https://github.com/IBM/sarama/pull/1064)). + - Claim support for Kafka 1.1.0 + ([#1073](https://github.com/IBM/sarama/pull/1073)). + +Bug Fixes: + - Fix FindCoordinatorResponse.encode to allow nil Coordinator + ([#1050](https://github.com/IBM/sarama/pull/1050), + [#1051](https://github.com/IBM/sarama/pull/1051)). + - Clear all metadata when we have the latest topic info + ([#1033](https://github.com/IBM/sarama/pull/1033)). + - Make `PartitionConsumer.Close` idempotent + ([#1092](https://github.com/IBM/sarama/pull/1092)). + +## Version 1.16.0 (2018-02-12) + +New Features: + - Add support for the Create/Delete Topics request/response pairs + ([#1007](https://github.com/IBM/sarama/pull/1007), + [#1008](https://github.com/IBM/sarama/pull/1008)). + - Add support for the Describe/Create/Delete ACL request/response pairs + ([#1009](https://github.com/IBM/sarama/pull/1009)). + - Add support for the five transaction-related request/response pairs + ([#1016](https://github.com/IBM/sarama/pull/1016)). + +Improvements: + - Permit setting version on mock producer responses + ([#999](https://github.com/IBM/sarama/pull/999)). + - Add `NewMockBrokerListener` helper for testing TLS connections + ([#1019](https://github.com/IBM/sarama/pull/1019)). + - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB + which results in much higher throughput in most cases + ([#1024](https://github.com/IBM/sarama/pull/1024)). + - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to + reduce CPU and memory usage when processing many partitions + ([#1028](https://github.com/IBM/sarama/pull/1028)). + - Assign relative offsets to messages in the producer to save the brokers a + recompression pass + ([#1002](https://github.com/IBM/sarama/pull/1002), + [#1015](https://github.com/IBM/sarama/pull/1015)). + +Bug Fixes: + - Fix producing uncompressed batches with the new protocol format + ([#1032](https://github.com/IBM/sarama/issues/1032)). + - Fix consuming compacted topics with the new protocol format + ([#1005](https://github.com/IBM/sarama/issues/1005)). + - Fix consuming topics with a mix of protocol formats + ([#1021](https://github.com/IBM/sarama/issues/1021)). + - Fix consuming when the broker includes multiple batches in a single response + ([#1022](https://github.com/IBM/sarama/issues/1022)). + - Fix detection of `PartialTrailingMessage` when the partial message was + truncated before the magic value indicating its version + ([#1030](https://github.com/IBM/sarama/pull/1030)). + - Fix expectation-checking in the mock of `SyncProducer.SendMessages` + ([#1035](https://github.com/IBM/sarama/pull/1035)). + +## Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/IBM/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/IBM/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/IBM/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/IBM/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/IBM/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/IBM/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/IBM/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/IBM/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/IBM/sarama/pull/991)). + +## Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/IBM/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/IBM/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/IBM/sarama/pull/975)). + +## Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/IBM/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/IBM/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/IBM/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/IBM/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/IBM/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/IBM/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/IBM/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/IBM/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/IBM/sarama/pull/940)). + +## Version 1.12.0 (2017-05-08) + +New Features: + - Added support for the `ApiVersions` request and response pair, and Kafka + version 0.10.2 ([#867](https://github.com/IBM/sarama/pull/867)). Note + that you still need to specify the Kafka version in the Sarama configuration + for the time being. + - Added a `Brokers` method to the Client which returns the complete set of + active brokers ([#813](https://github.com/IBM/sarama/pull/813)). + - Added an `InSyncReplicas` method to the Client which returns the set of all + in-sync broker IDs for the given partition, now that the Kafka versions for + which this was misleading are no longer in our supported set + ([#872](https://github.com/IBM/sarama/pull/872)). + - Added a `NewCustomHashPartitioner` method which allows constructing a hash + partitioner with a custom hash method in case the default (FNV-1a) is not + suitable + ([#837](https://github.com/IBM/sarama/pull/837), + [#841](https://github.com/IBM/sarama/pull/841)). + +Improvements: + - Recognize more Kafka error codes + ([#859](https://github.com/IBM/sarama/pull/859)). + +Bug Fixes: + - Fix an issue where decoding a malformed FetchRequest would not return the + correct error ([#818](https://github.com/IBM/sarama/pull/818)). + - Respect ordering of group protocols in JoinGroupRequests. This fix is + transparent if you're using the `AddGroupProtocol` or + `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from + the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` + ([#812](https://github.com/IBM/sarama/issues/812)). + - Fix an alignment-related issue with atomics on 32-bit architectures + ([#859](https://github.com/IBM/sarama/pull/859)). + +## Version 1.11.0 (2016-12-20) + +_Important:_ As of Sarama 1.11 it is necessary to set the config value of +`Producer.Return.Successes` to true in order to use the SyncProducer. Previous +versions would silently override this value when instantiating a SyncProducer +which led to unexpected values and data races. + +New Features: + - Metrics! Thanks to Sébastien Launay for all his work on this feature + ([#701](https://github.com/IBM/sarama/pull/701), + [#746](https://github.com/IBM/sarama/pull/746), + [#766](https://github.com/IBM/sarama/pull/766)). + - Add support for LZ4 compression + ([#786](https://github.com/IBM/sarama/pull/786)). + - Add support for ListOffsetRequest v1 and Kafka 0.10.1 + ([#775](https://github.com/IBM/sarama/pull/775)). + - Added a `HighWaterMarks` method to the Consumer which aggregates the + `HighWaterMarkOffset` values of its child topic/partitions + ([#769](https://github.com/IBM/sarama/pull/769)). + +Bug Fixes: + - Fixed producing when using timestamps, compression and Kafka 0.10 + ([#759](https://github.com/IBM/sarama/pull/759)). + - Added missing decoder methods to DescribeGroups response + ([#756](https://github.com/IBM/sarama/pull/756)). + - Fix producer shutdown when `Return.Errors` is disabled + ([#787](https://github.com/IBM/sarama/pull/787)). + - Don't mutate configuration in SyncProducer + ([#790](https://github.com/IBM/sarama/pull/790)). + - Fix crash on SASL initialization failure + ([#795](https://github.com/IBM/sarama/pull/795)). + +## Version 1.10.1 (2016-08-30) + +Bug Fixes: + - Fix the documentation for `HashPartitioner` which was incorrect + ([#717](https://github.com/IBM/sarama/pull/717)). + - Permit client creation even when it is limited by ACLs + ([#722](https://github.com/IBM/sarama/pull/722)). + - Several fixes to the consumer timer optimization code, regressions introduced + in v1.10.0. Go's timers are finicky + ([#730](https://github.com/IBM/sarama/pull/730), + [#733](https://github.com/IBM/sarama/pull/733), + [#734](https://github.com/IBM/sarama/pull/734)). + - Handle consuming compressed relative offsets with Kafka 0.10 + ([#735](https://github.com/IBM/sarama/pull/735)). + +## Version 1.10.0 (2016-08-02) + +_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of +Kafka you are running against (via the `config.Version` value) in order to use +features that may not be compatible with old Kafka versions. If you don't +specify this value it will default to 0.8.2 (the minimum supported), and trying +to use more recent features (like the offset manager) will fail with an error. + +_Also:_ The offset-manager's behaviour has been changed to match the upstream +java consumer (see [#705](https://github.com/IBM/sarama/pull/705) and +[#713](https://github.com/IBM/sarama/pull/713)). If you use the +offset-manager, please ensure that you are committing one *greater* than the +last consumed message offset or else you may end up consuming duplicate +messages. + +New Features: + - Support for Kafka 0.10 + ([#672](https://github.com/IBM/sarama/pull/672), + [#678](https://github.com/IBM/sarama/pull/678), + [#681](https://github.com/IBM/sarama/pull/681), and others). + - Support for configuring the target Kafka version + ([#676](https://github.com/IBM/sarama/pull/676)). + - Batch producing support in the SyncProducer + ([#677](https://github.com/IBM/sarama/pull/677)). + - Extend producer mock to allow setting expectations on message contents + ([#667](https://github.com/IBM/sarama/pull/667)). + +Improvements: + - Support `nil` compressed messages for deleting in compacted topics + ([#634](https://github.com/IBM/sarama/pull/634)). + - Pre-allocate decoding errors, greatly reducing heap usage and GC time against + misbehaving brokers ([#690](https://github.com/IBM/sarama/pull/690)). + - Re-use consumer expiry timers, removing one allocation per consumed message + ([#707](https://github.com/IBM/sarama/pull/707)). + +Bug Fixes: + - Actually default the client ID to "sarama" like we say we do + ([#664](https://github.com/IBM/sarama/pull/664)). + - Fix a rare issue where `Client.Leader` could return the wrong error + ([#685](https://github.com/IBM/sarama/pull/685)). + - Fix a possible tight loop in the consumer + ([#693](https://github.com/IBM/sarama/pull/693)). + - Match upstream's offset-tracking behaviour + ([#705](https://github.com/IBM/sarama/pull/705)). + - Report UnknownTopicOrPartition errors from the offset manager + ([#706](https://github.com/IBM/sarama/pull/706)). + - Fix possible negative partition value from the HashPartitioner + ([#709](https://github.com/IBM/sarama/pull/709)). + +## Version 1.9.0 (2016-05-16) + +New Features: + - Add support for custom offset manager retention durations + ([#602](https://github.com/IBM/sarama/pull/602)). + - Publish low-level mocks to enable testing of third-party producer/consumer + implementations ([#570](https://github.com/IBM/sarama/pull/570)). + - Declare support for Golang 1.6 + ([#611](https://github.com/IBM/sarama/pull/611)). + - Support for SASL plain-text auth + ([#648](https://github.com/IBM/sarama/pull/648)). + +Improvements: + - Simplified broker locking scheme slightly + ([#604](https://github.com/IBM/sarama/pull/604)). + - Documentation cleanup + ([#605](https://github.com/IBM/sarama/pull/605), + [#621](https://github.com/IBM/sarama/pull/621), + [#654](https://github.com/IBM/sarama/pull/654)). + +Bug Fixes: + - Fix race condition shutting down the OffsetManager + ([#658](https://github.com/IBM/sarama/pull/658)). + +## Version 1.8.0 (2016-02-01) + +New Features: + - Full support for Kafka 0.9: + - All protocol messages and fields + ([#586](https://github.com/IBM/sarama/pull/586), + [#588](https://github.com/IBM/sarama/pull/588), + [#590](https://github.com/IBM/sarama/pull/590)). + - Verified that TLS support works + ([#581](https://github.com/IBM/sarama/pull/581)). + - Fixed the OffsetManager compatibility + ([#585](https://github.com/IBM/sarama/pull/585)). + +Improvements: + - Optimize for fewer system calls when reading from the network + ([#584](https://github.com/IBM/sarama/pull/584)). + - Automatically retry `InvalidMessage` errors to match upstream behaviour + ([#589](https://github.com/IBM/sarama/pull/589)). + +## Version 1.7.0 (2015-12-11) + +New Features: + - Preliminary support for Kafka 0.9 + ([#572](https://github.com/IBM/sarama/pull/572)). This comes with several + caveats: + - Protocol-layer support is mostly in place + ([#577](https://github.com/IBM/sarama/pull/577)), however Kafka 0.9 + renamed some messages and fields, which we did not in order to preserve API + compatibility. + - The producer and consumer work against 0.9, but the offset manager does + not ([#573](https://github.com/IBM/sarama/pull/573)). + - TLS support may or may not work + ([#581](https://github.com/IBM/sarama/pull/581)). + +Improvements: + - Don't wait for request timeouts on dead brokers, greatly speeding recovery + when the TCP connection is left hanging + ([#548](https://github.com/IBM/sarama/pull/548)). + - Refactored part of the producer. The new version provides a much more elegant + solution to [#449](https://github.com/IBM/sarama/pull/449). It is also + slightly more efficient, and much more precise in calculating batch sizes + when compression is used + ([#549](https://github.com/IBM/sarama/pull/549), + [#550](https://github.com/IBM/sarama/pull/550), + [#551](https://github.com/IBM/sarama/pull/551)). + +Bug Fixes: + - Fix race condition in consumer test mock + ([#553](https://github.com/IBM/sarama/pull/553)). + +## Version 1.6.1 (2015-09-25) + +Bug Fixes: + - Fix panic that could occur if a user-supplied message value failed to encode + ([#449](https://github.com/IBM/sarama/pull/449)). + +## Version 1.6.0 (2015-09-04) + +New Features: + - Implementation of a consumer offset manager using the APIs introduced in + Kafka 0.8.2. The API is designed mainly for integration into a future + high-level consumer, not for direct use, although it is *possible* to use it + directly. + ([#461](https://github.com/IBM/sarama/pull/461)). + +Improvements: + - CRC32 calculation is much faster on machines with SSE4.2 instructions, + removing a major hotspot from most profiles + ([#255](https://github.com/IBM/sarama/pull/255)). + +Bug Fixes: + - Make protocol decoding more robust against some malformed packets generated + by go-fuzz ([#523](https://github.com/IBM/sarama/pull/523), + [#525](https://github.com/IBM/sarama/pull/525)) or found in other ways + ([#528](https://github.com/IBM/sarama/pull/528)). + - Fix a potential race condition panic in the consumer on shutdown + ([#529](https://github.com/IBM/sarama/pull/529)). + +## Version 1.5.0 (2015-08-17) + +New Features: + - TLS-encrypted network connections are now supported. This feature is subject + to change when Kafka releases built-in TLS support, but for now this is + enough to work with TLS-terminating proxies + ([#154](https://github.com/IBM/sarama/pull/154)). + +Improvements: + - The consumer will not block if a single partition is not drained by the user; + all other partitions will continue to consume normally + ([#485](https://github.com/IBM/sarama/pull/485)). + - Formatting of error strings has been much improved + ([#495](https://github.com/IBM/sarama/pull/495)). + - Internal refactoring of the producer for code cleanliness and to enable + future work ([#300](https://github.com/IBM/sarama/pull/300)). + +Bug Fixes: + - Fix a potential deadlock in the consumer on shutdown + ([#475](https://github.com/IBM/sarama/pull/475)). + +## Version 1.4.3 (2015-07-21) + +Bug Fixes: + - Don't include the partitioner in the producer's "fetch partitions" + circuit-breaker ([#466](https://github.com/IBM/sarama/pull/466)). + - Don't retry messages until the broker is closed when abandoning a broker in + the producer ([#468](https://github.com/IBM/sarama/pull/468)). + - Update the import path for snappy-go, it has moved again and the API has + changed slightly ([#486](https://github.com/IBM/sarama/pull/486)). + +## Version 1.4.2 (2015-05-27) + +Bug Fixes: + - Update the import path for snappy-go, it has moved from google code to github + ([#456](https://github.com/IBM/sarama/pull/456)). + +## Version 1.4.1 (2015-05-25) + +Improvements: + - Optimizations when decoding snappy messages, thanks to John Potocny + ([#446](https://github.com/IBM/sarama/pull/446)). + +Bug Fixes: + - Fix hypothetical race conditions on producer shutdown + ([#450](https://github.com/IBM/sarama/pull/450), + [#451](https://github.com/IBM/sarama/pull/451)). + +## Version 1.4.0 (2015-05-01) + +New Features: + - The consumer now implements `Topics()` and `Partitions()` methods to enable + users to dynamically choose what topics/partitions to consume without + instantiating a full client + ([#431](https://github.com/IBM/sarama/pull/431)). + - The partition-consumer now exposes the high water mark offset value returned + by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/IBM/sarama/pull/339)). + - Added a `kafka-console-consumer` tool capable of handling multiple + partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` + ([#439](https://github.com/IBM/sarama/pull/439), + [#442](https://github.com/IBM/sarama/pull/442)). + +Improvements: + - The producer's logging during retry scenarios is more consistent, more + useful, and slightly less verbose + ([#429](https://github.com/IBM/sarama/pull/429)). + - The client now shuffles its initial list of seed brokers in order to prevent + thundering herd on the first broker in the list + ([#441](https://github.com/IBM/sarama/pull/441)). + +Bug Fixes: + - The producer now correctly manages its state if retries occur when it is + shutting down, fixing several instances of confusing behaviour and at least + one potential deadlock ([#419](https://github.com/IBM/sarama/pull/419)). + - The consumer now handles messages for different partitions asynchronously, + making it much more resilient to specific user code ordering + ([#325](https://github.com/IBM/sarama/pull/325)). + +## Version 1.3.0 (2015-04-16) + +New Features: + - The client now tracks consumer group coordinators using + ConsumerMetadataRequests similar to how it tracks partition leadership using + regular MetadataRequests ([#411](https://github.com/IBM/sarama/pull/411)). + This adds two methods to the client API: + - `Coordinator(consumerGroup string) (*Broker, error)` + - `RefreshCoordinator(consumerGroup string) error` + +Improvements: + - ConsumerMetadataResponses now automatically create a Broker object out of the + ID/address/port combination for the Coordinator; accessing the fields + individually has been deprecated + ([#413](https://github.com/IBM/sarama/pull/413)). + - Much improved handling of `OffsetOutOfRange` errors in the consumer. + Consumers will fail to start if the provided offset is out of range + ([#418](https://github.com/IBM/sarama/pull/418)) + and they will automatically shut down if the offset falls out of range + ([#424](https://github.com/IBM/sarama/pull/424)). + - Small performance improvement in encoding and decoding protocol messages + ([#427](https://github.com/IBM/sarama/pull/427)). + +Bug Fixes: + - Fix a rare race condition in the client's background metadata refresher if + it happens to be activated while the client is being closed + ([#422](https://github.com/IBM/sarama/pull/422)). + +## Version 1.2.0 (2015-04-07) + +Improvements: + - The producer's behaviour when `Flush.Frequency` is set is now more intuitive + ([#389](https://github.com/IBM/sarama/pull/389)). + - The producer is now somewhat more memory-efficient during and after retrying + messages due to an improved queue implementation + ([#396](https://github.com/IBM/sarama/pull/396)). + - The consumer produces much more useful logging output when leadership + changes ([#385](https://github.com/IBM/sarama/pull/385)). + - The client's `GetOffset` method will now automatically refresh metadata and + retry once in the event of stale information or similar + ([#394](https://github.com/IBM/sarama/pull/394)). + - Broker connections now have support for using TCP keepalives + ([#407](https://github.com/IBM/sarama/issues/407)). + +Bug Fixes: + - The OffsetCommitRequest message now correctly implements all three possible + API versions ([#390](https://github.com/IBM/sarama/pull/390), + [#400](https://github.com/IBM/sarama/pull/400)). + +## Version 1.1.0 (2015-03-20) + +Improvements: + - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly + broken topics don't choke throughput + ([#373](https://github.com/IBM/sarama/pull/373)). + +Bug Fixes: + - Fix the producer's internal reference counting in certain unusual scenarios + ([#367](https://github.com/IBM/sarama/pull/367)). + - Fix the consumer's internal reference counting in certain unusual scenarios + ([#369](https://github.com/IBM/sarama/pull/369)). + - Fix a condition where the producer's internal control messages could have + gotten stuck ([#368](https://github.com/IBM/sarama/pull/368)). + - Fix an issue where invalid partition lists would be cached when asking for + metadata for a non-existant topic ([#372](https://github.com/IBM/sarama/pull/372)). + + +## Version 1.0.0 (2015-03-17) + +Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: + +- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. +- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. +- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/IBM/sarama/mocks` package. +- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. +- All the configuration values have been unified in the `Config` struct. +- Much improved test suite. diff --git a/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md b/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..8470ec5ce9 --- /dev/null +++ b/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +dominic.evans@uk.ibm.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/vendor/github.com/IBM/sarama/CONTRIBUTING.md b/vendor/github.com/IBM/sarama/CONTRIBUTING.md new file mode 100644 index 0000000000..bb88127c0e --- /dev/null +++ b/vendor/github.com/IBM/sarama/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +[fork]: https://github.com/IBM/sarama/fork +[pr]: https://github.com/IBM/sarama/compare +[released]: https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license + +Hi there! We are thrilled that you would like to contribute to Sarama. +Contributions are always welcome, both reporting issues and submitting pull requests! + +## Reporting issues + +Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth. + +- What SHA of Sarama are you running? If this is not the latest SHA on the main branch, please try if the problem persists with the latest version. +- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description. +- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it. + +Also, please include the following information about your environment, so we can help you faster: + +- What version of Kafka are you using? +- What version of Go are you using? +- What are the values of your Producer/Consumer/Client configuration? + + +## Contributing a change + +Contributions to this project are [released][released] to the public under the project's [opensource license](LICENSE.md). +By contributing to this project you agree to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO). +The DCO was created by the Linux Kernel community and is a simple statement that you, as a contributor, wrote or otherwise have the legal right to contribute those changes. + +Contributors must _sign-off_ that they adhere to these requirements by adding a `Signed-off-by` line to all commit messages with an email address that matches the commit author: + +``` +feat: this is my commit message + +Signed-off-by: Random J Developer +``` + +Git even has a `-s` command line option to append this automatically to your +commit message: + +``` +$ git commit -s -m 'This is my commit message' +``` + +Because this library is in production use by many people and applications, we code review all additions. +To make the review process go as smooth as possible, please consider the following. + +- If you plan to work on something major, please open an issue to discuss the design first. +- Don't break backwards compatibility. If you really have to, open an issue to discuss this first. +- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving. +- Run [go vet](https://golang.org/cmd/vet/) to detect any suspicious constructs in your code that could be bugs. +- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`. You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors. +- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems. +- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions. +- Make sure your code is supported by all the Go versions we support. + You can rely on GitHub Actions for testing older Go versions. + +## Submitting a pull request + +0. [Fork][fork] and clone the repository +1. Create a new branch: `git checkout -b my-branch-name` +2. Make your change, push to your fork and [submit a pull request][pr] +3. Wait for your pull request to be reviewed and merged. + +Here are a few things you can do that will increase the likelihood of your pull request being accepted: + +- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests. +- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). + +## Further Reading + +- [Developer Certificate of Origin versus Contributor License Agreements](https://julien.ponge.org/blog/developer-certificate-of-origin-versus-contributor-license-agreements/) +- [The most powerful contributor agreement](https://lwn.net/Articles/592503/) +- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/) +- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/) +- [GitHub Help](https://help.github.com) diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka new file mode 100644 index 0000000000..186c2eb186 --- /dev/null +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -0,0 +1,47 @@ +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8@sha256:b93deceb59a58588d5b16429fc47f98920f84740a1f2ed6454e33275f0701b59 + +USER root + +RUN microdnf update -y \ + && microdnf install -y curl gzip java-11-openjdk-headless tar tzdata-java \ + && microdnf reinstall -y tzdata \ + && microdnf clean all + +ENV JAVA_HOME=/usr/lib/jvm/jre-11 + +# https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html +# Ensure Java doesn't cache any dns results +RUN cd /etc/java/java-11-openjdk/*/conf/security \ + && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \ + && echo 'networkaddress.cache.ttl=0' >> java.security \ + && echo 'networkaddress.cache.negative.ttl=0' >> java.security + +ARG SCALA_VERSION="2.13" +ARG KAFKA_VERSION="3.6.0" + +# https://github.com/apache/kafka/blob/9989b68d0d38c8f1357f78bf9d53a58c1476188d/tests/docker/Dockerfile#L46-L72 +ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +RUN mkdir -p "/opt/kafka-${KAFKA_VERSION}" \ + && chmod a+rw "/opt/kafka-${KAFKA_VERSION}" \ + && curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" + +# older kafka versions depend upon jaxb-api being bundled with the JDK, but it +# was removed from Java 11 so work around that by including it in the kafka +# libs dir regardless +WORKDIR /tmp +RUN curl -sLO "https://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.3.0/jaxb-api-2.3.0.jar" \ + && for DIR in /opt/kafka-*; do cp -v jaxb-api-2.3.0.jar $DIR/libs/ ; done \ + && rm -f jaxb-api-2.3.0.jar + +WORKDIR /opt/kafka-${KAFKA_VERSION} + +ENV JAVA_MAJOR_VERSION=11 + +RUN sed -e "s/JAVA_MAJOR_VERSION=.*/JAVA_MAJOR_VERSION=${JAVA_MAJOR_VERSION}/" -i"" ./bin/kafka-run-class.sh + +COPY entrypoint.sh / + +USER 65534:65534 + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/IBM/sarama/LICENSE.md similarity index 95% rename from vendor/github.com/Shopify/sarama/LICENSE rename to vendor/github.com/IBM/sarama/LICENSE.md index d2bf4352f4..f8f64d4173 100644 --- a/vendor/github.com/Shopify/sarama/LICENSE +++ b/vendor/github.com/IBM/sarama/LICENSE.md @@ -1,5 +1,9 @@ +# MIT License + Copyright (c) 2013 Shopify +Copyright (c) 2023 IBM Corporation + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile similarity index 100% rename from vendor/github.com/Shopify/sarama/Makefile rename to vendor/github.com/IBM/sarama/Makefile diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/IBM/sarama/README.md similarity index 63% rename from vendor/github.com/Shopify/sarama/README.md rename to vendor/github.com/IBM/sarama/README.md index 0ee6e6a7f6..4534d7b41d 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/IBM/sarama/README.md @@ -1,18 +1,19 @@ # sarama -[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama) -[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/main/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) +[![Go Reference](https://pkg.go.dev/badge/github.com/IBM/sarama.svg)](https://pkg.go.dev/github.com/IBM/sarama) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/IBM/sarama/badge?style=flat)](https://securityscorecards.dev/viewer/?uri=github.com/IBM/sarama) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/7996/badge)](https://www.bestpractices.dev/projects/7996) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/). ## Getting started -- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama). +- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/IBM/sarama). - Mocks for testing are available in the [mocks](./mocks) subpackage. - The [examples](./examples) directory contains more elaborate example applications. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. -You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). +You might also want to look at the [Frequently Asked Questions](https://github.com/IBM/sarama/wiki/Frequently-Asked-Questions). ## Compatibility and API stability @@ -20,14 +21,15 @@ Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. However, older releases of Kafka are still likely to work. -Sarama follows semantic versioning and provides API stability via the gopkg.in service. -You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. +Sarama follows semantic versioning and provides API stability via the standard Go +[module version numbering](https://go.dev/doc/modules/version-numbers) scheme. + A changelog is available [here](CHANGELOG.md). ## Contributing -- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/main/.github/CONTRIBUTING.md). -- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. +- Get started by checking our [contribution guidelines](https://github.com/IBM/sarama/blob/main/CONTRIBUTING.md). +- Read the [Sarama wiki](https://github.com/IBM/sarama/wiki) for more technical and design details. - The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. - For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. - If you have any questions, just ask! diff --git a/vendor/github.com/IBM/sarama/SECURITY.md b/vendor/github.com/IBM/sarama/SECURITY.md new file mode 100644 index 0000000000..b2f6e61fe7 --- /dev/null +++ b/vendor/github.com/IBM/sarama/SECURITY.md @@ -0,0 +1,11 @@ +# Security + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +The easiest way to report a security issue is privately through GitHub [here](https://github.com/IBM/sarama/security/advisories/new). + +See [Privately reporting a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability) for full instructions. + +Alternatively, you can report them via e-mail or anonymous form to the IBM Product Security Incident Response Team (PSIRT) following the guidelines under the [IBM Security Vulnerability Management](https://www.ibm.com/support/pages/ibm-security-vulnerability-management) pages. diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/IBM/sarama/Vagrantfile similarity index 100% rename from vendor/github.com/Shopify/sarama/Vagrantfile rename to vendor/github.com/IBM/sarama/Vagrantfile diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/IBM/sarama/acl_bindings.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_bindings.go rename to vendor/github.com/IBM/sarama/acl_bindings.go diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/IBM/sarama/acl_create_request.go similarity index 94% rename from vendor/github.com/Shopify/sarama/acl_create_request.go rename to vendor/github.com/IBM/sarama/acl_create_request.go index 449102f74a..e581c984a9 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_request.go +++ b/vendor/github.com/IBM/sarama/acl_create_request.go @@ -51,6 +51,10 @@ func (c *CreateAclsRequest) headerVersion() int16 { return 1 } +func (c *CreateAclsRequest) isValidVersion() bool { + return c.Version >= 0 && c.Version <= 1 +} + func (c *CreateAclsRequest) requiredVersion() KafkaVersion { switch c.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/IBM/sarama/acl_create_response.go similarity index 86% rename from vendor/github.com/Shopify/sarama/acl_create_response.go rename to vendor/github.com/IBM/sarama/acl_create_response.go index 21d6c340cc..d123ba8631 100644 --- a/vendor/github.com/Shopify/sarama/acl_create_response.go +++ b/vendor/github.com/IBM/sarama/acl_create_response.go @@ -4,6 +4,7 @@ import "time" // CreateAclsResponse is a an acl response creation type type CreateAclsResponse struct { + Version int16 ThrottleTime time.Duration AclCreationResponses []*AclCreationResponse } @@ -52,15 +53,28 @@ func (c *CreateAclsResponse) key() int16 { } func (c *CreateAclsResponse) version() int16 { - return 0 + return c.Version } func (c *CreateAclsResponse) headerVersion() int16 { return 0 } +func (c *CreateAclsResponse) isValidVersion() bool { + return c.Version >= 0 && c.Version <= 1 +} + func (c *CreateAclsResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch c.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *CreateAclsResponse) throttleTime() time.Duration { + return r.ThrottleTime } // AclCreationResponse is an acl creation response type diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/IBM/sarama/acl_delete_request.go similarity index 92% rename from vendor/github.com/Shopify/sarama/acl_delete_request.go rename to vendor/github.com/IBM/sarama/acl_delete_request.go index 5e5c03bc2d..abeb4425e7 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_request.go +++ b/vendor/github.com/IBM/sarama/acl_delete_request.go @@ -52,6 +52,10 @@ func (d *DeleteAclsRequest) headerVersion() int16 { return 1 } +func (d *DeleteAclsRequest) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/IBM/sarama/acl_delete_response.go similarity index 92% rename from vendor/github.com/Shopify/sarama/acl_delete_response.go rename to vendor/github.com/IBM/sarama/acl_delete_response.go index cd33749d5e..2e2850b32a 100644 --- a/vendor/github.com/Shopify/sarama/acl_delete_response.go +++ b/vendor/github.com/IBM/sarama/acl_delete_response.go @@ -60,8 +60,21 @@ func (d *DeleteAclsResponse) headerVersion() int16 { return 0 } +func (d *DeleteAclsResponse) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *DeleteAclsResponse) throttleTime() time.Duration { + return r.ThrottleTime } // FilterResponse is a filter response type diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/IBM/sarama/acl_describe_request.go similarity index 82% rename from vendor/github.com/Shopify/sarama/acl_describe_request.go rename to vendor/github.com/IBM/sarama/acl_describe_request.go index e0fe9023a2..7d65bef14b 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_request.go +++ b/vendor/github.com/IBM/sarama/acl_describe_request.go @@ -1,6 +1,6 @@ package sarama -// DescribeAclsRequest is a secribe acl request type +// DescribeAclsRequest is a describe acl request type type DescribeAclsRequest struct { Version int AclFilter @@ -29,6 +29,10 @@ func (d *DescribeAclsRequest) headerVersion() int16 { return 1 } +func (d *DescribeAclsRequest) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/IBM/sarama/acl_describe_response.go similarity index 90% rename from vendor/github.com/Shopify/sarama/acl_describe_response.go rename to vendor/github.com/IBM/sarama/acl_describe_response.go index 3255fd4857..f89a53b662 100644 --- a/vendor/github.com/Shopify/sarama/acl_describe_response.go +++ b/vendor/github.com/IBM/sarama/acl_describe_response.go @@ -81,6 +81,10 @@ func (d *DescribeAclsResponse) headerVersion() int16 { return 0 } +func (d *DescribeAclsResponse) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { switch d.Version { case 1: @@ -89,3 +93,7 @@ func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } } + +func (r *DescribeAclsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/IBM/sarama/acl_filter.go similarity index 100% rename from vendor/github.com/Shopify/sarama/acl_filter.go rename to vendor/github.com/IBM/sarama/acl_filter.go diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/IBM/sarama/acl_types.go similarity index 93% rename from vendor/github.com/Shopify/sarama/acl_types.go rename to vendor/github.com/IBM/sarama/acl_types.go index c3ba8ddcf6..62bb5342ae 100644 --- a/vendor/github.com/Shopify/sarama/acl_types.go +++ b/vendor/github.com/IBM/sarama/acl_types.go @@ -60,7 +60,7 @@ func (a *AclOperation) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation +// UnmarshalText takes a text representation of the operation and converts it to an AclOperation func (a *AclOperation) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclOperation{ @@ -114,7 +114,7 @@ func (a *AclPermissionType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType +// UnmarshalText takes a text representation of the permission type and converts it to an AclPermissionType func (a *AclPermissionType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclPermissionType{ @@ -166,7 +166,7 @@ func (a *AclResourceType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType +// UnmarshalText takes a text representation of the resource type and converts it to an AclResourceType func (a *AclResourceType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourceType{ @@ -217,7 +217,7 @@ func (a *AclResourcePatternType) MarshalText() ([]byte, error) { return []byte(a.String()), nil } -// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType +// UnmarshalText takes a text representation of the resource pattern type and converts it to an AclResourcePatternType func (a *AclResourcePatternType) UnmarshalText(text []byte) error { normalized := strings.ToLower(string(text)) mapping := map[string]AclResourcePatternType{ diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go similarity index 80% rename from vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go rename to vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go index a96af93417..6d3df9bedc 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go +++ b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go @@ -2,6 +2,7 @@ package sarama // AddOffsetsToTxnRequest adds offsets to a transaction request type AddOffsetsToTxnRequest struct { + Version int16 TransactionalID string ProducerID int64 ProducerEpoch int16 @@ -45,13 +46,26 @@ func (a *AddOffsetsToTxnRequest) key() int16 { } func (a *AddOffsetsToTxnRequest) version() int16 { - return 0 + return a.Version } func (a *AddOffsetsToTxnRequest) headerVersion() int16 { return 1 } +func (a *AddOffsetsToTxnRequest) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_7_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go similarity index 72% rename from vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go rename to vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go index bb61973d16..136460508a 100644 --- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go +++ b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go @@ -6,6 +6,7 @@ import ( // AddOffsetsToTxnResponse is a response type for adding offsets to txns type AddOffsetsToTxnResponse struct { + Version int16 ThrottleTime time.Duration Err KError } @@ -37,13 +38,30 @@ func (a *AddOffsetsToTxnResponse) key() int16 { } func (a *AddOffsetsToTxnResponse) version() int16 { - return 0 + return a.Version } func (a *AddOffsetsToTxnResponse) headerVersion() int16 { return 0 } +func (a *AddOffsetsToTxnResponse) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_7_0_0 + } +} + +func (r *AddOffsetsToTxnResponse) throttleTime() time.Duration { + return r.ThrottleTime } diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go similarity index 83% rename from vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go rename to vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go index 57ecf64884..3e2c63c64e 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go +++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go @@ -1,7 +1,8 @@ package sarama -// AddPartitionsToTxnRequest is a add paartition request +// AddPartitionsToTxnRequest is a add partition request type AddPartitionsToTxnRequest struct { + Version int16 TransactionalID string ProducerID int64 ProducerEpoch int16 @@ -69,13 +70,24 @@ func (a *AddPartitionsToTxnRequest) key() int16 { } func (a *AddPartitionsToTxnRequest) version() int16 { - return 0 + return a.Version } func (a *AddPartitionsToTxnRequest) headerVersion() int16 { return 1 } +func (a *AddPartitionsToTxnRequest) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go similarity index 85% rename from vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go rename to vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go index 0989565076..8ef0a2a2c4 100644 --- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go +++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go @@ -6,6 +6,7 @@ import ( // AddPartitionsToTxnResponse is a partition errors to transaction type type AddPartitionsToTxnResponse struct { + Version int16 ThrottleTime time.Duration Errors map[string][]*PartitionError } @@ -34,6 +35,7 @@ func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error { } func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + a.Version = version throttleTime, err := pd.getInt32() if err != nil { return err @@ -76,15 +78,30 @@ func (a *AddPartitionsToTxnResponse) key() int16 { } func (a *AddPartitionsToTxnResponse) version() int16 { - return 0 + return a.Version } func (a *AddPartitionsToTxnResponse) headerVersion() int16 { return 0 } +func (a *AddPartitionsToTxnResponse) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *AddPartitionsToTxnResponse) throttleTime() time.Duration { + return r.ThrottleTime } // PartitionError is a partition error type diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go similarity index 88% rename from vendor/github.com/Shopify/sarama/admin.go rename to vendor/github.com/IBM/sarama/admin.go index f06198a652..dcf1d7659c 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/IBM/sarama/admin.go @@ -196,9 +196,9 @@ func (ca *clusterAdmin) refreshController() (*Broker, error) { return ca.client.RefreshController() } -// isErrNoController returns `true` if the given error type unwraps to an +// isErrNotController returns `true` if the given error type unwraps to an // `ErrNotController` response from Kafka -func isErrNoController(err error) bool { +func isErrNotController(err error) bool { return errors.Is(err, ErrNotController) } @@ -207,19 +207,17 @@ func isErrNoController(err error) bool { // provided retryable func) up to the maximum number of tries permitted by // the admin client configuration func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error { - var err error - for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { - err = fn() - if err == nil || !retryable(err) { + for attemptsRemaining := ca.conf.Admin.Retry.Max + 1; ; { + err := fn() + attemptsRemaining-- + if err == nil || attemptsRemaining <= 0 || !retryable(err) { return err } Logger.Printf( "admin/request retrying after %dms... (%d attempts remaining)\n", - ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt) + ca.conf.Admin.Retry.Backoff/time.Millisecond, attemptsRemaining) time.Sleep(ca.conf.Admin.Retry.Backoff) - continue } - return err } func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { @@ -240,14 +238,18 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO Timeout: ca.conf.Admin.Timeout, } - if ca.conf.Version.IsAtLeast(V0_11_0_0) { - request.Version = 1 - } - if ca.conf.Version.IsAtLeast(V1_0_0_0) { + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + // Version 3 is the same as version 2 (brokers response before throttling) + request.Version = 3 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + // Version 2 is the same as version 1 (response has ThrottleTime) request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_10_2_0) { + // Version 1 adds validateOnly. + request.Version = 1 } - return ca.retryOnError(isErrNoController, func() error { + return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err @@ -275,23 +277,19 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO } func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { - controller, err := ca.Controller() - if err != nil { - return nil, err - } - - request := &MetadataRequest{ - Topics: topics, - AllowAutoTopicCreation: false, - } - - if ca.conf.Version.IsAtLeast(V1_0_0_0) { - request.Version = 5 - } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { - request.Version = 4 - } - - response, err := controller.GetMetadata(request) + var response *MetadataResponse + err = ca.retryOnError(isErrNotController, func() error { + controller, err := ca.Controller() + if err != nil { + return err + } + request := NewMetadataRequest(ca.conf.Version, topics) + response, err = controller.GetMetadata(request) + if isErrNotController(err) { + _, _ = ca.refreshController() + } + return err + }) if err != nil { return nil, err } @@ -299,20 +297,20 @@ func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetada } func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { - controller, err := ca.Controller() - if err != nil { - return nil, int32(0), err - } - - request := &MetadataRequest{ - Topics: []string{}, - } - - if ca.conf.Version.IsAtLeast(V0_10_0_0) { - request.Version = 1 - } + var response *MetadataResponse + err = ca.retryOnError(isErrNotController, func() error { + controller, err := ca.Controller() + if err != nil { + return err + } - response, err := controller.GetMetadata(request) + request := NewMetadataRequest(ca.conf.Version, nil) + response, err = controller.GetMetadata(request) + if isErrNotController(err) { + _, _ = ca.refreshController() + } + return err + }) if err != nil { return nil, int32(0), err } @@ -352,7 +350,7 @@ func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { } _ = b.Open(ca.client.Config()) - metadataReq := &MetadataRequest{} + metadataReq := NewMetadataRequest(ca.conf.Version, nil) metadataResp, err := b.GetMetadata(metadataReq) if err != nil { return nil, err @@ -406,6 +404,7 @@ func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { topicDetails.ConfigEntries = make(map[string]*string) for _, entry := range resource.Configs { + entry := entry // only include non-default non-sensitive config // (don't actually think topic config will ever be sensitive) if entry.Default || entry.Sensitive { @@ -430,11 +429,16 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error { Timeout: ca.conf.Admin.Timeout, } - if ca.conf.Version.IsAtLeast(V0_11_0_0) { + // Versions 0, 1, 2, and 3 are the same. + if ca.conf.Version.IsAtLeast(V2_1_0_0) { + request.Version = 3 + } else if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { request.Version = 1 } - return ca.retryOnError(isErrNoController, func() error { + return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err @@ -474,8 +478,11 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ Timeout: ca.conf.Admin.Timeout, ValidateOnly: validateOnly, } + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } - return ca.retryOnError(isErrNoController, func() error { + return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err @@ -516,7 +523,7 @@ func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][ request.AddBlock(topic, int32(i), assignment[i]) } - return ca.retryOnError(isErrNoController, func() error { + return ca.retryOnError(isErrNotController, func() error { b, err := ca.Controller() if err != nil { return err @@ -562,13 +569,20 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in request.AddBlock(topic, partitions) - b, err := ca.Controller() - if err != nil { - return nil, err - } - _ = b.Open(ca.client.Config()) + var rsp *ListPartitionReassignmentsResponse + err = ca.retryOnError(isErrNotController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + _ = b.Open(ca.client.Config()) - rsp, err := b.ListPartitionReassignments(request) + rsp, err = b.ListPartitionReassignments(request) + if isErrNotController(err) { + _, _ = ca.refreshController() + } + return err + }) if err == nil && rsp != nil { return rsp.TopicStatus, nil @@ -604,6 +618,9 @@ func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]i Topics: topics, Timeout: ca.conf.Admin.Timeout, } + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } rsp, err := broker.DeleteRecords(request) if err != nil { errs = append(errs, err) @@ -683,11 +700,8 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, for _, rspResource := range rsp.Resources { if rspResource.Name == resource.Name { - if rspResource.ErrorMsg != "" { - return nil, errors.New(rspResource.ErrorMsg) - } if rspResource.ErrorCode != 0 { - return nil, KError(rspResource.ErrorCode) + return nil, &DescribeConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg} } for _, cfgEntry := range rspResource.Configs { entries = append(entries, *cfgEntry) @@ -709,6 +723,9 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string Resources: resources, ValidateOnly: validateOnly, } + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } var ( b *Broker @@ -738,11 +755,8 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string for _, rspResource := range rsp.Resources { if rspResource.Name == name { - if rspResource.ErrorMsg != "" { - return errors.New(rspResource.ErrorMsg) - } if rspResource.ErrorCode != 0 { - return KError(rspResource.ErrorCode) + return &AlterConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg} } } } @@ -908,8 +922,19 @@ func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*Group describeReq := &DescribeGroupsRequest{ Groups: brokerGroups, } - if ca.conf.Version.IsAtLeast(V2_3_0_0) { + + if ca.conf.Version.IsAtLeast(V2_4_0_0) { + // Starting in version 4, the response will include group.instance.id info for members. describeReq.Version = 4 + } else if ca.conf.Version.IsAtLeast(V2_3_0_0) { + // Starting in version 3, authorized operations can be requested. + describeReq.Version = 3 + } else if ca.conf.Version.IsAtLeast(V2_0_0_0) { + // Version 2 is the same as version 0. + describeReq.Version = 2 + } else if ca.conf.Version.IsAtLeast(V1_1_0_0) { + // Version 1 is the same as version 0. + describeReq.Version = 1 } response, err := broker.DescribeGroups(describeReq) if err != nil { @@ -936,7 +961,22 @@ func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err e defer wg.Done() _ = b.Open(conf) // Ensure that broker is opened - response, err := b.ListGroups(&ListGroupsRequest{}) + request := &ListGroupsRequest{} + if ca.conf.Version.IsAtLeast(V2_6_0_0) { + // Version 4 adds the StatesFilter field (KIP-518). + request.Version = 4 + } else if ca.conf.Version.IsAtLeast(V2_4_0_0) { + // Version 3 is the first flexible version. + request.Version = 3 + } else if ca.conf.Version.IsAtLeast(V2_0_0_0) { + // Version 2 is the same as version 0. + request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } + + response, err := b.ListGroups(request) if err != nil { errChan <- err return @@ -972,16 +1012,7 @@ func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions m return nil, err } - request := &OffsetFetchRequest{ - ConsumerGroup: group, - partitions: topicPartitions, - } - - if ca.conf.Version.IsAtLeast(V0_10_2_0) { - request.Version = 2 - } else if ca.conf.Version.IsAtLeast(V0_8_2_2) { - request.Version = 1 - } + request := NewOffsetFetchRequest(ca.conf.Version, group, topicPartitions) return coordinator.FetchOffset(request) } @@ -1023,6 +1054,9 @@ func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { request := &DeleteGroupsRequest{ Groups: []string{group}, } + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } resp, err := coordinator.DeleteGroups(request) if err != nil { @@ -1060,7 +1094,11 @@ func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32 defer wg.Done() _ = b.Open(conf) // Ensure that broker is opened - response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{}) + request := &DescribeLogDirsRequest{} + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + response, err := b.DescribeLogDirs(request) if err != nil { errChan <- err return @@ -1131,12 +1169,16 @@ func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsU Upsertions: u, } - b, err := ca.Controller() - if err != nil { - return nil, err - } + var rsp *AlterUserScramCredentialsResponse + err := ca.retryOnError(isErrNotController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } - rsp, err := b.AlterUserScramCredentials(req) + rsp, err = b.AlterUserScramCredentials(req) + return err + }) if err != nil { return nil, err } @@ -1207,6 +1249,10 @@ func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op Clie } func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) { + if !ca.conf.Version.IsAtLeast(V2_4_0_0) { + return nil, ConfigurationError("Removing members from a consumer group headers requires Kafka version of at least v2.4.0") + } + controller, err := ca.client.Coordinator(groupId) if err != nil { return nil, err diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go similarity index 97% rename from vendor/github.com/Shopify/sarama/alter_client_quotas_request.go rename to vendor/github.com/IBM/sarama/alter_client_quotas_request.go index f528512d02..a7fa0cbd13 100644 --- a/vendor/github.com/Shopify/sarama/alter_client_quotas_request.go +++ b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go @@ -12,6 +12,7 @@ package sarama // validate_only => BOOLEAN type AlterClientQuotasRequest struct { + Version int16 Entries []AlterClientQuotasEntry // The quota configuration entries to alter. ValidateOnly bool // Whether the alteration should be validated, but not performed. } @@ -182,13 +183,17 @@ func (a *AlterClientQuotasRequest) key() int16 { } func (a *AlterClientQuotasRequest) version() int16 { - return 0 + return a.Version } func (a *AlterClientQuotasRequest) headerVersion() int16 { return 1 } +func (a *AlterClientQuotasRequest) isValidVersion() bool { + return a.Version == 0 +} + func (a *AlterClientQuotasRequest) requiredVersion() KafkaVersion { return V2_6_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go similarity index 94% rename from vendor/github.com/Shopify/sarama/alter_client_quotas_response.go rename to vendor/github.com/IBM/sarama/alter_client_quotas_response.go index ccd27d5f5e..cce997cae2 100644 --- a/vendor/github.com/Shopify/sarama/alter_client_quotas_response.go +++ b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go @@ -14,6 +14,7 @@ import ( // entity_name => NULLABLE_STRING type AlterClientQuotasResponse struct { + Version int16 ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. Entries []AlterClientQuotasEntryResponse // The quota configuration entries altered. } @@ -133,13 +134,21 @@ func (a *AlterClientQuotasResponse) key() int16 { } func (a *AlterClientQuotasResponse) version() int16 { - return 0 + return a.Version } func (a *AlterClientQuotasResponse) headerVersion() int16 { return 0 } +func (a *AlterClientQuotasResponse) isValidVersion() bool { + return a.Version == 0 +} + func (a *AlterClientQuotasResponse) requiredVersion() KafkaVersion { return V2_6_0_0 } + +func (r *AlterClientQuotasResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/IBM/sarama/alter_configs_request.go similarity index 90% rename from vendor/github.com/Shopify/sarama/alter_configs_request.go rename to vendor/github.com/IBM/sarama/alter_configs_request.go index 8b94b1f3fe..ee1ab64458 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_request.go +++ b/vendor/github.com/IBM/sarama/alter_configs_request.go @@ -2,6 +2,7 @@ package sarama // AlterConfigsRequest is an alter config request type type AlterConfigsRequest struct { + Version int16 Resources []*AlterConfigsResource ValidateOnly bool } @@ -114,13 +115,24 @@ func (a *AlterConfigsRequest) key() int16 { } func (a *AlterConfigsRequest) version() int16 { - return 0 + return a.Version } func (a *AlterConfigsRequest) headerVersion() int16 { return 1 } +func (a *AlterConfigsRequest) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 1 +} + func (a *AlterConfigsRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_0_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/IBM/sarama/alter_configs_response.go similarity index 78% rename from vendor/github.com/Shopify/sarama/alter_configs_response.go rename to vendor/github.com/IBM/sarama/alter_configs_response.go index 84cd86c729..d8b70e3718 100644 --- a/vendor/github.com/Shopify/sarama/alter_configs_response.go +++ b/vendor/github.com/IBM/sarama/alter_configs_response.go @@ -1,13 +1,30 @@ package sarama -import "time" +import ( + "fmt" + "time" +) // AlterConfigsResponse is a response type for alter config type AlterConfigsResponse struct { + Version int16 ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } +type AlterConfigError struct { + Err KError + ErrMsg string +} + +func (c *AlterConfigError) Error() string { + text := c.Err.Error() + if c.ErrMsg != "" { + text = fmt.Sprintf("%s - %s", text, c.ErrMsg) + } + return text +} + // AlterConfigsResourceResponse is a response type for alter config resource type AlterConfigsResourceResponse struct { ErrorCode int16 @@ -100,17 +117,32 @@ func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) e } func (a *AlterConfigsResponse) key() int16 { - return 32 + return 33 } func (a *AlterConfigsResponse) version() int16 { - return 0 + return a.Version } func (a *AlterConfigsResponse) headerVersion() int16 { return 0 } +func (a *AlterConfigsResponse) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 1 +} + func (a *AlterConfigsResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_0_0_0 + } +} + +func (r *AlterConfigsResponse) throttleTime() time.Duration { + return r.ThrottleTime } diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go similarity index 96% rename from vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go rename to vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go index f0a2f9dd59..f898f87a20 100644 --- a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go +++ b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go @@ -113,6 +113,10 @@ func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 { return 2 } +func (r *AlterPartitionReassignmentsRequest) isValidVersion() bool { + return r.Version == 0 +} + func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion { return V2_4_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go similarity index 93% rename from vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go rename to vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go index b3f9a15fe7..1ee56b40ee 100644 --- a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go +++ b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type alterPartitionReassignmentsErrorBlock struct { errorCode KError errorMessage *string @@ -152,6 +154,14 @@ func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 { return 1 } +func (r *AlterPartitionReassignmentsResponse) isValidVersion() bool { + return r.Version == 0 +} + func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion { return V2_4_0_0 } + +func (r *AlterPartitionReassignmentsResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go similarity index 97% rename from vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go rename to vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go index 0530d8946a..f29f164cff 100644 --- a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go +++ b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go @@ -137,6 +137,10 @@ func (r *AlterUserScramCredentialsRequest) headerVersion() int16 { return 2 } +func (r *AlterUserScramCredentialsRequest) isValidVersion() bool { + return r.Version == 0 +} + func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion { return V2_7_0_0 } diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go similarity index 91% rename from vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go rename to vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go index 31e167b5eb..75eac0cec1 100644 --- a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go +++ b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go @@ -89,6 +89,14 @@ func (r *AlterUserScramCredentialsResponse) headerVersion() int16 { return 2 } +func (r *AlterUserScramCredentialsResponse) isValidVersion() bool { + return r.Version == 0 +} + func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion { return V2_7_0_0 } + +func (r *AlterUserScramCredentialsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/IBM/sarama/api_versions_request.go similarity index 89% rename from vendor/github.com/Shopify/sarama/api_versions_request.go rename to vendor/github.com/IBM/sarama/api_versions_request.go index e5b3baf646..f94174daf2 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_request.go +++ b/vendor/github.com/IBM/sarama/api_versions_request.go @@ -57,13 +57,21 @@ func (r *ApiVersionsRequest) headerVersion() int16 { return 1 } +func (r *ApiVersionsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { switch r.Version { - case 0: - return V0_10_0_0 case 3: return V2_4_0_0 - default: + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: return V0_10_0_0 + default: + return V2_4_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/IBM/sarama/api_versions_response.go similarity index 91% rename from vendor/github.com/Shopify/sarama/api_versions_response.go rename to vendor/github.com/IBM/sarama/api_versions_response.go index ade911c597..457c79a95b 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_response.go +++ b/vendor/github.com/IBM/sarama/api_versions_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + // ApiVersionsResponseKey contains the APIs supported by the broker. type ApiVersionsResponseKey struct { // Version defines the protocol version to use for encode and decode @@ -144,13 +146,25 @@ func (r *ApiVersionsResponse) headerVersion() int16 { return 0 } +func (r *ApiVersionsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { switch r.Version { - case 0: - return V0_10_0_0 case 3: return V2_4_0_0 - default: + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: return V0_10_0_0 + default: + return V2_4_0_0 } } + +func (r *ApiVersionsResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go similarity index 97% rename from vendor/github.com/Shopify/sarama/async_producer.go rename to vendor/github.com/IBM/sarama/async_producer.go index 5c23ac7751..f629a6a2e7 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/IBM/sarama/async_producer.go @@ -20,7 +20,6 @@ import ( // leaks and message lost: it will not be garbage-collected automatically when it passes // out of scope and buffered messages may not be flushed. type AsyncProducer interface { - // AsyncClose triggers a shutdown of the producer. The shutdown has completed // when both the Errors and Successes channels have been closed. When calling // AsyncClose, you *must* continue to read from those channels in order to @@ -50,7 +49,7 @@ type AsyncProducer interface { // errors to be returned. Errors() <-chan *ProducerError - // IsTransactional return true when current producer is is transactional. + // IsTransactional return true when current producer is transactional. IsTransactional() bool // TxnStatus return current producer transaction status. @@ -366,17 +365,17 @@ func (p *asyncProducer) Close() error { }) } - var errors ProducerErrors + var pErrs ProducerErrors if p.conf.Producer.Return.Errors { for event := range p.errors { - errors = append(errors, event) + pErrs = append(pErrs, event) } } else { <-p.errors } - if len(errors) > 0 { - return errors + if len(pErrs) > 0 { + return pErrs } return nil } @@ -450,8 +449,10 @@ func (p *asyncProducer) dispatcher() { p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11")) continue } - if msg.ByteSize(version) > p.conf.Producer.MaxMessageBytes { - p.returnError(msg, ErrMessageSizeTooLarge) + + size := msg.ByteSize(version) + if size > p.conf.Producer.MaxMessageBytes { + p.returnError(msg, ConfigurationError(fmt.Sprintf("Attempt to produce message larger than configured Producer.MaxMessageBytes: %d > %d", size, p.conf.Producer.MaxMessageBytes))) continue } @@ -613,6 +614,18 @@ func (pp *partitionProducer) backoff(retries int) { } } +func (pp *partitionProducer) updateLeaderIfBrokerProducerIsNil(msg *ProducerMessage) error { + if pp.brokerProducer == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnError(msg, err) + pp.backoff(msg.retries) + return err + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + return nil +} + func (pp *partitionProducer) dispatch() { // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` // on the first message @@ -644,6 +657,9 @@ func (pp *partitionProducer) dispatch() { } if msg.retries > pp.highWatermark { + if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil { + continue + } // a new, higher, retry level; handle it and then back off pp.newHighWatermark(msg.retries) pp.backoff(msg.retries) @@ -670,14 +686,8 @@ func (pp *partitionProducer) dispatch() { // if we made it this far then the current msg contains real data, and can be sent to the next goroutine // without breaking any of our ordering guarantees - - if pp.brokerProducer == nil { - if err := pp.updateLeader(); err != nil { - pp.parent.returnError(msg, err) - pp.backoff(msg.retries) - continue - } - Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil { + continue } // Now that we know we have a broker to actually try and send this message to, generate the sequence @@ -1152,7 +1162,7 @@ func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitio produceSet.bufferCount += len(pSet.msgs) for _, msg := range pSet.msgs { if msg.retries >= p.conf.Producer.Retry.Max { - p.returnError(msg, kerr) + p.returnErrors(pSet.msgs, kerr) return } msg.retries++ diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go similarity index 94% rename from vendor/github.com/Shopify/sarama/balance_strategy.go rename to vendor/github.com/IBM/sarama/balance_strategy.go index 4594df6f6d..30d41779c1 100644 --- a/vendor/github.com/Shopify/sarama/balance_strategy.go +++ b/vendor/github.com/IBM/sarama/balance_strategy.go @@ -57,35 +57,42 @@ type BalanceStrategy interface { // -------------------------------------------------------------------- -// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. +// NewBalanceStrategyRange returns a range balance strategy, +// which is the default and assigns partitions as ranges to consumer group members. // This follows the same logic as // https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html // // Example with two topics T1 and T2 with six partitions each (0..5) and two members (M1, M2): // // M1: {T1: [0, 1, 2], T2: [0, 1, 2]} -// M2: {T2: [3, 4, 5], T2: [3, 4, 5]} -var BalanceStrategyRange = &balanceStrategy{ - name: RangeBalanceStrategyName, - coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { - partitionsPerConsumer := len(partitions) / len(memberIDs) - consumersWithExtraPartition := len(partitions) % len(memberIDs) - - sort.Strings(memberIDs) - - for i, memberID := range memberIDs { - min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) - extra := 0 - if i < consumersWithExtraPartition { - extra = 1 +// M2: {T1: [3, 4, 5], T2: [3, 4, 5]} +func NewBalanceStrategyRange() BalanceStrategy { + return &balanceStrategy{ + name: RangeBalanceStrategyName, + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + partitionsPerConsumer := len(partitions) / len(memberIDs) + consumersWithExtraPartition := len(partitions) % len(memberIDs) + + sort.Strings(memberIDs) + + for i, memberID := range memberIDs { + min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i))) + extra := 0 + if i < consumersWithExtraPartition { + extra = 1 + } + max := min + partitionsPerConsumer + extra + plan.Add(memberID, topic, partitions[min:max]...) } - max := min + partitionsPerConsumer + extra - plan.Add(memberID, topic, partitions[min:max]...) - } - }, + }, + } } -// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments +// Deprecated: use NewBalanceStrategyRange to avoid data race issue +var BalanceStrategyRange = NewBalanceStrategyRange() + +// NewBalanceStrategySticky returns a sticky balance strategy, +// which assigns partitions to members with an attempt to preserve earlier assignments // while maintain a balanced partition distribution. // Example with topic T with six partitions (0..5) and two members (M1, M2): // @@ -97,13 +104,18 @@ var BalanceStrategyRange = &balanceStrategy{ // M1: {T: [0, 2]} // M2: {T: [1, 3]} // M3: {T: [4, 5]} -var BalanceStrategySticky = &stickyBalanceStrategy{} +func NewBalanceStrategySticky() BalanceStrategy { + return &stickyBalanceStrategy{} +} + +// Deprecated: use NewBalanceStrategySticky to avoid data race issue +var BalanceStrategySticky = NewBalanceStrategySticky() // -------------------------------------------------------------------- type balanceStrategy struct { - name string coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) + name string } // Name implements BalanceStrategy. @@ -171,10 +183,7 @@ func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetad } // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state - isFreshAssignment := false - if len(currentAssignment) == 0 { - isFreshAssignment = true - } + isFreshAssignment := len(currentAssignment) == 0 // create a mapping of all current topic partitions and the consumers that can be assigned to them partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string) @@ -281,10 +290,7 @@ func strsContains(s []string, value string) bool { // Balance assignments across consumers for maximum fairness and stickiness. func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) { - initializing := false - if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 { - initializing = true - } + initializing := len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 // assign all unassigned partitions for _, partition := range unassignedPartitions { @@ -337,11 +343,17 @@ func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPart } } -// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// NewBalanceStrategyRoundRobin returns a round-robin balance strategy, +// which assigns partitions to members in alternating order. // For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2): // M0: [t0p0, t0p2, t1p1] // M1: [t0p1, t1p0, t1p2] -var BalanceStrategyRoundRobin = new(roundRobinBalancer) +func NewBalanceStrategyRoundRobin() BalanceStrategy { + return new(roundRobinBalancer) +} + +// Deprecated: use NewBalanceStrategyRoundRobin to avoid data race issue +var BalanceStrategyRoundRobin = NewBalanceStrategyRoundRobin() type roundRobinBalancer struct{} @@ -414,8 +426,8 @@ func (tp *topicAndPartition) comparedValue() string { } type memberAndTopic struct { - memberID string topics map[string]struct{} + memberID string } func (m *memberAndTopic) hasTopic(topic string) bool { @@ -681,11 +693,8 @@ func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, par } heap.Init(&pq) - for { - // loop until no consumer-group members remain - if pq.Len() == 0 { - break - } + // loop until no consumer-group members remain + for pq.Len() != 0 { member := pq[0] // partitions that were assigned to a different consumer last time @@ -995,20 +1004,21 @@ func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, cur } for _, pair := range pairs { - if pair.SrcMemberID == src { - // create a deep copy of the pairs, excluding the current pair - reducedSet := make([]consumerPair, len(pairs)-1) - i := 0 - for _, p := range pairs { - if p != pair { - reducedSet[i] = pair - i++ - } + if pair.SrcMemberID != src { + continue + } + // create a deep copy of the pairs, excluding the current pair + reducedSet := make([]consumerPair, len(pairs)-1) + i := 0 + for _, p := range pairs { + if p != pair { + reducedSet[i] = pair + i++ } - - currentPath = append(currentPath, pair.SrcMemberID) - return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath) } + + currentPath = append(currentPath, pair.SrcMemberID) + return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath) } return currentPath, false } @@ -1106,9 +1116,9 @@ type assignmentPriorityQueue []*consumerGroupMember func (pq assignmentPriorityQueue) Len() int { return len(pq) } func (pq assignmentPriorityQueue) Less(i, j int) bool { - // order asssignment priority queue in descending order using assignment-count/member-id + // order assignment priority queue in descending order using assignment-count/member-id if len(pq[i].assignments) == len(pq[j].assignments) { - return strings.Compare(pq[i].id, pq[j].id) > 0 + return pq[i].id > pq[j].id } return len(pq[i].assignments) > len(pq[j].assignments) } diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go similarity index 90% rename from vendor/github.com/Shopify/sarama/broker.go rename to vendor/github.com/IBM/sarama/broker.go index 5815fd5679..268696cf46 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/IBM/sarama/broker.go @@ -33,29 +33,33 @@ type Broker struct { responses chan *responsePromise done chan bool - metricRegistry metrics.Registry - incomingByteRate metrics.Meter - requestRate metrics.Meter - fetchRate metrics.Meter - requestSize metrics.Histogram - requestLatency metrics.Histogram - outgoingByteRate metrics.Meter - responseRate metrics.Meter - responseSize metrics.Histogram - requestsInFlight metrics.Counter - brokerIncomingByteRate metrics.Meter - brokerRequestRate metrics.Meter - brokerFetchRate metrics.Meter - brokerRequestSize metrics.Histogram - brokerRequestLatency metrics.Histogram - brokerOutgoingByteRate metrics.Meter - brokerResponseRate metrics.Meter - brokerResponseSize metrics.Histogram - brokerRequestsInFlight metrics.Counter - brokerThrottleTime metrics.Histogram + metricRegistry metrics.Registry + incomingByteRate metrics.Meter + requestRate metrics.Meter + fetchRate metrics.Meter + requestSize metrics.Histogram + requestLatency metrics.Histogram + outgoingByteRate metrics.Meter + responseRate metrics.Meter + responseSize metrics.Histogram + requestsInFlight metrics.Counter + protocolRequestsRate map[int16]metrics.Meter + brokerIncomingByteRate metrics.Meter + brokerRequestRate metrics.Meter + brokerFetchRate metrics.Meter + brokerRequestSize metrics.Histogram + brokerRequestLatency metrics.Histogram + brokerOutgoingByteRate metrics.Meter + brokerResponseRate metrics.Meter + brokerResponseSize metrics.Histogram + brokerRequestsInFlight metrics.Counter + brokerThrottleTime metrics.Histogram + brokerProtocolRequestsRate map[int16]metrics.Meter kerberosAuthenticator GSSAPIKerberosAuth clientSessionReauthenticationTimeMs int64 + + throttleTimer *time.Timer } // SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker @@ -173,7 +177,9 @@ func (b *Broker) Open(conf *Config) error { b.lock.Lock() - b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + if b.metricRegistry == nil { + b.metricRegistry = newCleanupRegistry(conf.MetricRegistry) + } go withRecover(func() { defer func() { @@ -218,6 +224,7 @@ func (b *Broker) Open(conf *Config) error { b.responseRate = metrics.GetOrRegisterMeter("response-rate", b.metricRegistry) b.responseSize = getOrRegisterHistogram("response-size", b.metricRegistry) b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", b.metricRegistry) + b.protocolRequestsRate = map[int16]metrics.Meter{} // Do not gather metrics for seeded broker (only used during bootstrap) because they share // the same id (-1) and are already exposed through the global metrics above if b.id >= 0 && !metrics.UseNilMetrics { @@ -253,6 +260,7 @@ func (b *Broker) Open(conf *Config) error { b.connErr = b.authenticateViaSASLv1() if b.connErr != nil { close(b.responses) + <-b.done err = b.conn.Close() if err == nil { DebugLogger.Printf("Closed connection to broker %s\n", b.addr) @@ -364,6 +372,7 @@ func (b *Broker) Rack() string { // GetMetadata send a metadata request and returns a metadata response or error func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { response := new(MetadataResponse) + response.Version = request.Version // Required to ensure use of the correct response header version err := b.sendAndReceive(request, response) if err != nil { @@ -426,11 +435,16 @@ type ProduceCallback func(*ProduceResponse, error) // // Make sure not to Close the broker in the callback as it will lead to a deadlock. func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error { + b.lock.Lock() + defer b.lock.Unlock() + needAcks := request.RequiredAcks != NoResponse // Use a nil promise when no acks is required var promise *responsePromise if needAcks { + metricRegistry := b.metricRegistry + // Create ProduceResponse early to provide the header version res := new(ProduceResponse) promise = &responsePromise{ @@ -443,14 +457,14 @@ func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error return } - if err := versionedDecode(packets, res, request.version(), b.metricRegistry); err != nil { + if err := versionedDecode(packets, res, request.version(), metricRegistry); err != nil { // Malformed response cb(nil, err) return } - // Wellformed response - b.updateThrottleMetric(res.ThrottleTime) + // Well-formed response + b.handleThrottledResponse(res) cb(res, nil) }, } @@ -471,7 +485,6 @@ func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { } else { response = new(ProduceResponse) err = b.sendAndReceive(request, response) - b.updateThrottleMetric(response.ThrottleTime) } if err != nil { @@ -578,6 +591,7 @@ func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error // ListGroups return a list group response or error func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { response := new(ListGroupsResponse) + response.Version = request.Version // Required to ensure use of the correct response header version err := b.sendAndReceive(request, response) if err != nil { @@ -936,6 +950,7 @@ func (b *Broker) write(buf []byte) (n int, err error) { return b.conn.Write(buf) } +// b.lock must be held by caller func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { var promise *responsePromise if promiseResponse { @@ -960,10 +975,8 @@ func makeResponsePromise(responseHeaderVersion int16) *responsePromise { return promise } +// b.lock must be held by caller func (b *Broker) sendWithPromise(rb protocolBody, promise *responsePromise) error { - b.lock.Lock() - defer b.lock.Unlock() - if b.conn == nil { if b.connErr != nil { return b.connErr @@ -993,11 +1006,15 @@ func (b *Broker) sendInternal(rb protocolBody, promise *responsePromise) error { return err } + // check and wait if throttled + b.waitIfThrottled() + requestTime := time.Now() // Will be decremented in responseReceiver (except error or request with NoResponse) b.addRequestInFlightMetrics(1) bytes, err := b.write(buf) b.updateOutgoingCommunicationMetrics(bytes) + b.updateProtocolMetrics(rb) if err != nil { b.addRequestInFlightMetrics(-1) return err @@ -1018,6 +1035,8 @@ func (b *Broker) sendInternal(rb protocolBody, promise *responsePromise) error { } func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { + b.lock.Lock() + defer b.lock.Unlock() responseHeaderVersion := int16(-1) if res != nil { responseHeaderVersion = res.headerVersion() @@ -1032,13 +1051,20 @@ func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { return nil } - return b.handleResponsePromise(req, res, promise) + err = handleResponsePromise(req, res, promise, b.metricRegistry) + if err != nil { + return err + } + if res != nil { + b.handleThrottledResponse(res) + } + return nil } -func (b *Broker) handleResponsePromise(req protocolBody, res protocolBody, promise *responsePromise) error { +func handleResponsePromise(req protocolBody, res protocolBody, promise *responsePromise, metricRegistry metrics.Registry) error { select { case buf := <-promise.packets: - return versionedDecode(buf, res, req.version(), b.metricRegistry) + return versionedDecode(buf, res, req.version(), metricRegistry) case err := <-promise.errors: return err } @@ -1050,7 +1076,12 @@ func (b *Broker) decode(pd packetDecoder, version int16) (err error) { return err } - host, err := pd.getString() + var host string + if version < 9 { + host, err = pd.getString() + } else { + host, err = pd.getCompactString() + } if err != nil { return err } @@ -1060,11 +1091,13 @@ func (b *Broker) decode(pd packetDecoder, version int16) (err error) { return err } - if version >= 1 { + if version >= 1 && version < 9 { b.rack, err = pd.getNullableString() - if err != nil { - return err - } + } else if version >= 9 { + b.rack, err = pd.getCompactNullableString() + } + if err != nil { + return err } b.addr = net.JoinHostPort(host, fmt.Sprint(port)) @@ -1072,6 +1105,13 @@ func (b *Broker) decode(pd packetDecoder, version int16) (err error) { return err } + if version >= 9 { + _, err := pd.getEmptyTaggedFieldArray() + if err != nil { + return err + } + } + return nil } @@ -1088,7 +1128,11 @@ func (b *Broker) encode(pe packetEncoder, version int16) (err error) { pe.putInt32(b.id) - err = pe.putString(host) + if version < 9 { + err = pe.putString(host) + } else { + err = pe.putCompactString(host) + } if err != nil { return err } @@ -1096,12 +1140,20 @@ func (b *Broker) encode(pe packetEncoder, version int16) (err error) { pe.putInt32(int32(port)) if version >= 1 { - err = pe.putNullableString(b.rack) + if version < 9 { + err = pe.putNullableString(b.rack) + } else { + err = pe.putNullableCompactString(b.rack) + } if err != nil { return err } } + if version >= 9 { + pe.putEmptyTaggedFieldArray() + } + return nil } @@ -1181,6 +1233,7 @@ func (b *Broker) authenticateViaSASLv0() error { } func (b *Broker) authenticateViaSASLv1() error { + metricRegistry := b.metricRegistry if b.conf.Net.SASL.Handshake { handshakeRequest := &SaslHandshakeRequest{Mechanism: string(b.conf.Net.SASL.Mechanism), Version: b.conf.Net.SASL.Version} handshakeResponse := new(SaslHandshakeResponse) @@ -1191,7 +1244,7 @@ func (b *Broker) authenticateViaSASLv1() error { Logger.Printf("Error while performing SASL handshake %s\n", b.addr) return handshakeErr } - handshakeErr = b.handleResponsePromise(handshakeRequest, handshakeResponse, prom) + handshakeErr = handleResponsePromise(handshakeRequest, handshakeResponse, prom, metricRegistry) if handshakeErr != nil { Logger.Printf("Error while performing SASL handshake %s\n", b.addr) return handshakeErr @@ -1211,7 +1264,7 @@ func (b *Broker) authenticateViaSASLv1() error { Logger.Printf("Error while performing SASL Auth %s\n", b.addr) return nil, authErr } - authErr = b.handleResponsePromise(authenticateRequest, authenticateResponse, prom) + authErr = handleResponsePromise(authenticateRequest, authenticateResponse, prom, metricRegistry) if authErr != nil { Logger.Printf("Error while performing SASL Auth %s\n", b.addr) return nil, authErr @@ -1430,7 +1483,7 @@ func (b *Broker) sendAndReceiveSASLSCRAMv0() error { length := len(msg) authBytes := make([]byte, length+4) // 4 byte length header + auth data binary.BigEndian.PutUint32(authBytes, uint32(length)) - copy(authBytes[4:], []byte(msg)) + copy(authBytes[4:], msg) _, err := b.write(authBytes) b.updateOutgoingCommunicationMetrics(length + 4) if err != nil { @@ -1606,15 +1659,65 @@ func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { } } -func (b *Broker) updateThrottleMetric(throttleTime time.Duration) { - if throttleTime != time.Duration(0) { - DebugLogger.Printf( - "producer/broker/%d ProduceResponse throttled %v\n", - b.ID(), throttleTime) - if b.brokerThrottleTime != nil { - throttleTimeInMs := int64(throttleTime / time.Millisecond) - b.brokerThrottleTime.Update(throttleTimeInMs) +func (b *Broker) updateProtocolMetrics(rb protocolBody) { + protocolRequestsRate := b.protocolRequestsRate[rb.key()] + if protocolRequestsRate == nil { + protocolRequestsRate = metrics.GetOrRegisterMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key()), b.metricRegistry) + b.protocolRequestsRate[rb.key()] = protocolRequestsRate + } + protocolRequestsRate.Mark(1) + + if b.brokerProtocolRequestsRate != nil { + brokerProtocolRequestsRate := b.brokerProtocolRequestsRate[rb.key()] + if brokerProtocolRequestsRate == nil { + brokerProtocolRequestsRate = b.registerMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key())) + b.brokerProtocolRequestsRate[rb.key()] = brokerProtocolRequestsRate } + brokerProtocolRequestsRate.Mark(1) + } +} + +type throttleSupport interface { + throttleTime() time.Duration +} + +func (b *Broker) handleThrottledResponse(resp protocolBody) { + throttledResponse, ok := resp.(throttleSupport) + if !ok { + return + } + throttleTime := throttledResponse.throttleTime() + if throttleTime == time.Duration(0) { + return + } + DebugLogger.Printf( + "broker/%d %T throttled %v\n", b.ID(), resp, throttleTime) + b.setThrottle(throttleTime) + b.updateThrottleMetric(throttleTime) +} + +func (b *Broker) setThrottle(throttleTime time.Duration) { + if b.throttleTimer != nil { + // if there is an existing timer stop/clear it + if !b.throttleTimer.Stop() { + <-b.throttleTimer.C + } + } + b.throttleTimer = time.NewTimer(throttleTime) +} + +func (b *Broker) waitIfThrottled() { + if b.throttleTimer != nil { + DebugLogger.Printf("broker/%d waiting for throttle timer\n", b.ID()) + <-b.throttleTimer.C + b.throttleTimer = nil + } +} + +func (b *Broker) updateThrottleMetric(throttleTime time.Duration) { + if b.brokerThrottleTime != nil { + throttleTimeInMs := int64(throttleTime / time.Millisecond) + b.brokerThrottleTime.Update(throttleTimeInMs) } } @@ -1629,6 +1732,7 @@ func (b *Broker) registerMetrics() { b.brokerResponseSize = b.registerHistogram("response-size") b.brokerRequestsInFlight = b.registerCounter("requests-in-flight") b.brokerThrottleTime = b.registerHistogram("throttle-time-in-ms") + b.brokerProtocolRequestsRate = map[int16]metrics.Meter{} } func (b *Broker) registerMeter(name string) metrics.Meter { diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/IBM/sarama/client.go similarity index 85% rename from vendor/github.com/Shopify/sarama/client.go rename to vendor/github.com/IBM/sarama/client.go index b3b7dfbcca..2decba7c55 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/IBM/sarama/client.go @@ -1,13 +1,18 @@ package sarama import ( + "context" "errors" "math" "math/rand" + "net" "sort" + "strings" "sync" "sync/atomic" "time" + + "golang.org/x/net/proxy" ) // Client is a generic Kafka client. It manages connections to one or more Kafka brokers. @@ -50,6 +55,10 @@ type Client interface { // topic/partition, as determined by querying the cluster metadata. Leader(topic string, partitionID int32) (*Broker, error) + // LeaderAndEpoch returns the leader and its epoch for the current + // topic/partition, as determined by querying the cluster metadata. + LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) + // Replicas returns the set of all replica IDs for the given partition. Replicas(topic string, partitionID int32) ([]int32, error) @@ -128,10 +137,10 @@ const ( ) type client struct { - // updateMetaDataMs stores the time at which metadata was lasted updated. + // updateMetadataMs stores the time at which metadata was lasted updated. // Note: this accessed atomically so must be the first word in the struct // as per golang/go#41970 - updateMetaDataMs int64 + updateMetadataMs int64 conf *Config closer, closed chan none // for shutting down background metadata updater @@ -154,7 +163,6 @@ type client struct { cachedPartitionsResults map[string][maxPartitionIndex][]int32 lock sync.RWMutex // protects access to the maps that hold cluster state. - } // NewClient creates a new Client. It connects to one of the given broker addresses @@ -175,6 +183,13 @@ func NewClient(addrs []string, conf *Config) (Client, error) { return nil, ConfigurationError("You must provide at least one broker address") } + if strings.Contains(addrs[0], ".servicebus.windows.net") { + if conf.Version.IsAtLeast(V1_1_0_0) || !conf.Version.IsAtLeast(V0_11_0_0) { + Logger.Println("Connecting to Azure Event Hubs, forcing version to V1_0_0_0 for compatibility") + conf.Version = V1_0_0_0 + } + } + client := &client{ conf: conf, closer: make(chan none), @@ -187,6 +202,14 @@ func NewClient(addrs []string, conf *Config) (Client, error) { transactionCoordinators: make(map[string]int32), } + if conf.Net.ResolveCanonicalBootstrapServers { + var err error + addrs, err = client.resolveCanonicalNames(addrs) + if err != nil { + return nil, err + } + } + client.randomizeSeedBrokers(addrs) if conf.Metadata.Full { @@ -235,12 +258,26 @@ func (client *client) Broker(brokerID int32) (*Broker, error) { } func (client *client) InitProducerID() (*InitProducerIDResponse, error) { + // FIXME: this InitProducerID seems to only be called from client_test.go (TestInitProducerIDConnectionRefused) and has been superceded by transaction_manager.go? brokerErrors := make([]error, 0) - for broker := client.anyBroker(); broker != nil; broker = client.anyBroker() { - var response *InitProducerIDResponse - req := &InitProducerIDRequest{} + for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() { + request := &InitProducerIDRequest{} + + if client.conf.Version.IsAtLeast(V2_7_0_0) { + // Version 4 adds the support for new error code PRODUCER_FENCED. + request.Version = 4 + } else if client.conf.Version.IsAtLeast(V2_5_0_0) { + // Version 3 adds ProducerId and ProducerEpoch, allowing producers to try to resume after an INVALID_PRODUCER_EPOCH error + request.Version = 3 + } else if client.conf.Version.IsAtLeast(V2_4_0_0) { + // Version 2 is the first flexible version. + request.Version = 2 + } else if client.conf.Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } - response, err := broker.InitProducerID(req) + response, err := broker.InitProducerID(request) if err == nil { return response, nil } else { @@ -452,21 +489,25 @@ func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, } func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { + leader, _, err := client.LeaderAndEpoch(topic, partitionID) + return leader, err +} + +func (client *client) LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) { if client.Closed() { - return nil, ErrClosedClient + return nil, -1, ErrClosedClient } - leader, err := client.cachedLeader(topic, partitionID) - + leader, epoch, err := client.cachedLeader(topic, partitionID) if leader == nil { err = client.RefreshMetadata(topic) if err != nil { - return nil, err + return nil, -1, err } - leader, err = client.cachedLeader(topic, partitionID) + leader, epoch, err = client.cachedLeader(topic, partitionID) } - return leader, err + return leader, epoch, err } func (client *client) RefreshBrokers(addrs []string) error { @@ -478,16 +519,16 @@ func (client *client) RefreshBrokers(addrs []string) error { defer client.lock.Unlock() for _, broker := range client.brokers { - _ = broker.Close() - delete(client.brokers, broker.ID()) + safeAsyncClose(broker) } + client.brokers = make(map[int32]*Broker) for _, broker := range client.seedBrokers { - _ = broker.Close() + safeAsyncClose(broker) } for _, broker := range client.deadSeeds { - _ = broker.Close() + safeAsyncClose(broker) } client.seedBrokers = nil @@ -505,7 +546,7 @@ func (client *client) RefreshMetadata(topics ...string) error { // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper // error. This handles the case by returning an error instead of sending it - // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + // off to Kafka. See: https://github.com/IBM/sarama/pull/38#issuecomment-26362310 for _, topic := range topics { if topic == "" { return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return @@ -519,17 +560,17 @@ func (client *client) RefreshMetadata(topics ...string) error { return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline) } -func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { +func (client *client) GetOffset(topic string, partitionID int32, timestamp int64) (int64, error) { if client.Closed() { return -1, ErrClosedClient } - offset, err := client.getOffset(topic, partitionID, time) + offset, err := client.getOffset(topic, partitionID, timestamp) if err != nil { if err := client.RefreshMetadata(topic); err != nil { return -1, err } - return client.getOffset(topic, partitionID, time) + return client.getOffset(topic, partitionID, timestamp) } return offset, err @@ -722,22 +763,21 @@ func (client *client) registerBroker(broker *Broker) { } } -// deregisterBroker removes a broker from the seedsBroker list, and if it's -// not the seedbroker, removes it from brokers map completely. +// deregisterBroker removes a broker from the broker list, and if it's +// not in the broker list, removes it from seedBrokers. func (client *client) deregisterBroker(broker *Broker) { client.lock.Lock() defer client.lock.Unlock() + _, ok := client.brokers[broker.ID()] + if ok { + Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) + delete(client.brokers, broker.ID()) + return + } if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { client.deadSeeds = append(client.deadSeeds, broker) client.seedBrokers = client.seedBrokers[1:] - } else { - // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, - // but we really shouldn't have to; once that loop is made better this case can be - // removed, and the function generally can be renamed from `deregisterBroker` to - // `nextSeedBroker` or something - DebugLogger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) - delete(client.brokers, broker.ID()) } } @@ -750,33 +790,12 @@ func (client *client) resurrectDeadBrokers() { client.deadSeeds = nil } -func (client *client) anyBroker() *Broker { - client.lock.RLock() - defer client.lock.RUnlock() - - if len(client.seedBrokers) > 0 { - _ = client.seedBrokers[0].Open(client.conf) - return client.seedBrokers[0] - } - - // not guaranteed to be random *or* deterministic - for _, broker := range client.brokers { - _ = broker.Open(client.conf) - return broker - } - - return nil -} - +// LeastLoadedBroker returns the broker with the least pending requests. +// Firstly, choose the broker from cached broker list. If the broker list is empty, choose from seed brokers. func (client *client) LeastLoadedBroker() *Broker { client.lock.RLock() defer client.lock.RUnlock() - if len(client.seedBrokers) > 0 { - _ = client.seedBrokers[0].Open(client.conf) - return client.seedBrokers[0] - } - var leastLoadedBroker *Broker pendingRequests := math.MaxInt for _, broker := range client.brokers { @@ -785,10 +804,16 @@ func (client *client) LeastLoadedBroker() *Broker { leastLoadedBroker = broker } } - if leastLoadedBroker != nil { _ = leastLoadedBroker.Open(client.conf) + return leastLoadedBroker } + + if len(client.seedBrokers) > 0 { + _ = client.seedBrokers[0].Open(client.conf) + return client.seedBrokers[0] + } + return leastLoadedBroker } @@ -848,7 +873,7 @@ func (client *client) setPartitionCache(topic string, partitionSet partitionType return ret } -func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { +func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, int32, error) { client.lock.RLock() defer client.lock.RUnlock() @@ -857,31 +882,43 @@ func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, er metadata, ok := partitions[partitionID] if ok { if errors.Is(metadata.Err, ErrLeaderNotAvailable) { - return nil, ErrLeaderNotAvailable + return nil, -1, ErrLeaderNotAvailable } b := client.brokers[metadata.Leader] if b == nil { - return nil, ErrLeaderNotAvailable + return nil, -1, ErrLeaderNotAvailable } _ = b.Open(client.conf) - return b, nil + return b, metadata.LeaderEpoch, nil } } - return nil, ErrUnknownTopicOrPartition + return nil, -1, ErrUnknownTopicOrPartition } -func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { +func (client *client) getOffset(topic string, partitionID int32, timestamp int64) (int64, error) { broker, err := client.Leader(topic, partitionID) if err != nil { return -1, err } request := &OffsetRequest{} - if client.conf.Version.IsAtLeast(V0_10_1_0) { + if client.conf.Version.IsAtLeast(V2_1_0_0) { + // Version 4 adds the current leader epoch, which is used for fencing. + request.Version = 4 + } else if client.conf.Version.IsAtLeast(V2_0_0_0) { + // Version 3 is the same as version 2. + request.Version = 3 + } else if client.conf.Version.IsAtLeast(V0_11_0_0) { + // Version 2 adds the isolation level, which is used for transactional reads. + request.Version = 2 + } else if client.conf.Version.IsAtLeast(V0_10_1_0) { + // Version 1 removes MaxNumOffsets. From this version forward, only a single + // offset can be returned. request.Version = 1 } - request.AddBlock(topic, partitionID, time, 1) + + request.AddBlock(topic, partitionID, timestamp, 1) response, err := broker.GetAvailableOffsets(request) if err != nil { @@ -967,20 +1004,21 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, time.Sleep(backoff) } - t := atomic.LoadInt64(&client.updateMetaDataMs) - if time.Since(time.Unix(t/1e3, 0)) < backoff { + t := atomic.LoadInt64(&client.updateMetadataMs) + if time.Since(time.UnixMilli(t)) < backoff { return err } + attemptsRemaining-- Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) - return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) + return client.tryRefreshMetadata(topics, attemptsRemaining, deadline) } return err } - broker := client.anyBroker() + broker := client.LeastLoadedBroker() brokerErrors := make([]error, 0) - for ; broker != nil && !pastDeadline(0); broker = client.anyBroker() { + for ; broker != nil && !pastDeadline(0); broker = client.LeastLoadedBroker() { allowAutoTopicCreation := client.conf.Metadata.AllowAutoTopicCreation if len(topics) > 0 { DebugLogger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) @@ -989,22 +1027,21 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, DebugLogger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) } - req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation} - if client.conf.Version.IsAtLeast(V1_0_0_0) { - req.Version = 5 - } else if client.conf.Version.IsAtLeast(V0_10_0_0) { - req.Version = 1 - } - - t := atomic.LoadInt64(&client.updateMetaDataMs) - if !atomic.CompareAndSwapInt64(&client.updateMetaDataMs, t, time.Now().UnixNano()/int64(time.Millisecond)) { - return nil - } + req := NewMetadataRequest(client.conf.Version, topics) + req.AllowAutoTopicCreation = allowAutoTopicCreation + atomic.StoreInt64(&client.updateMetadataMs, time.Now().UnixMilli()) response, err := broker.GetMetadata(req) var kerror KError var packetEncodingError PacketEncodingError if err == nil { + // When talking to the startup phase of a broker, it is possible to receive an empty metadata set. We should remove that broker and try next broker (https://issues.apache.org/jira/browse/KAFKA-7924). + if len(response.Brokers) == 0 { + Logger.Println("client/metadata receiving empty brokers from the metadata response when requesting the broker #%d at %s", broker.ID(), broker.addr) + _ = broker.Close() + client.deregisterBroker(broker) + continue + } allKnownMetaData := len(topics) == 0 // valid response, use it shouldRetry, err := client.updateMetadata(response, allKnownMetaData) @@ -1157,24 +1194,30 @@ func (client *client) findCoordinator(coordinatorKey string, coordinatorType Coo retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { backoff := client.computeBackoff(attemptsRemaining) + attemptsRemaining-- Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) time.Sleep(backoff) - return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining-1) + return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining) } return nil, err } brokerErrors := make([]error, 0) - for broker := client.anyBroker(); broker != nil; broker = client.anyBroker() { + for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() { DebugLogger.Printf("client/coordinator requesting coordinator for %s from %s\n", coordinatorKey, broker.Addr()) request := new(FindCoordinatorRequest) request.CoordinatorKey = coordinatorKey request.CoordinatorType = coordinatorType + // Version 1 adds KeyType. if client.conf.Version.IsAtLeast(V0_11_0_0) { request.Version = 1 } + // Version 2 is the same as version 1. + if client.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 2 + } response, err := broker.FindCoordinator(request) if err != nil { @@ -1225,6 +1268,53 @@ func (client *client) findCoordinator(coordinatorKey string, coordinatorType Coo return retry(Wrap(ErrOutOfBrokers, brokerErrors...)) } +func (client *client) resolveCanonicalNames(addrs []string) ([]string, error) { + ctx := context.Background() + + dialer := client.Config().getDialer() + resolver := net.Resolver{ + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + // dial func should only be called once, so switching within is acceptable + switch d := dialer.(type) { + case proxy.ContextDialer: + return d.DialContext(ctx, network, address) + default: + // we have no choice but to ignore the context + return d.Dial(network, address) + } + }, + } + + canonicalAddrs := make(map[string]struct{}, len(addrs)) // dedupe as we go + for _, addr := range addrs { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return nil, err // message includes addr + } + + ips, err := resolver.LookupHost(ctx, host) + if err != nil { + return nil, err // message includes host + } + for _, ip := range ips { + ptrs, err := resolver.LookupAddr(ctx, ip) + if err != nil { + return nil, err // message includes ip + } + + // unlike the Java client, we do not further check that PTRs resolve + ptr := strings.TrimSuffix(ptrs[0], ".") // trailing dot breaks GSSAPI + canonicalAddrs[net.JoinHostPort(ptr, port)] = struct{}{} + } + } + + addrs = make([]string, 0, len(canonicalAddrs)) + for addr := range canonicalAddrs { + addrs = append(addrs, addr) + } + return addrs, nil +} + // nopCloserClient embeds an existing Client, but disables // the Close method (yet all other methods pass // through unchanged). This is for use in larger structs diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/IBM/sarama/compress.go similarity index 99% rename from vendor/github.com/Shopify/sarama/compress.go rename to vendor/github.com/IBM/sarama/compress.go index 504007a49b..a7bd525bc7 100644 --- a/vendor/github.com/Shopify/sarama/compress.go +++ b/vendor/github.com/IBM/sarama/compress.go @@ -2,11 +2,11 @@ package sarama import ( "bytes" - "compress/gzip" "fmt" "sync" snappy "github.com/eapache/go-xerial-snappy" + "github.com/klauspost/compress/gzip" "github.com/pierrec/lz4/v4" ) diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/IBM/sarama/config.go similarity index 95% rename from vendor/github.com/Shopify/sarama/config.go rename to vendor/github.com/IBM/sarama/config.go index b07034434c..ad970a3f08 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/IBM/sarama/config.go @@ -1,7 +1,6 @@ package sarama import ( - "compress/gzip" "crypto/tls" "fmt" "io" @@ -9,13 +8,16 @@ import ( "regexp" "time" + "github.com/klauspost/compress/gzip" "github.com/rcrowley/go-metrics" "golang.org/x/net/proxy" ) const defaultClientID = "sarama" -var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) +// validClientID specifies the permitted characters for a client.id when +// connecting to Kafka versions before 1.0.0 (KIP-190) +var validClientID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) // Config is used to pass multiple configuration options to Sarama's constructors. type Config struct { @@ -50,6 +52,15 @@ type Config struct { ReadTimeout time.Duration // How long to wait for a response. WriteTimeout time.Duration // How long to wait for a transmit. + // ResolveCanonicalBootstrapServers turns each bootstrap broker address + // into a set of IPs, then does a reverse lookup on each one to get its + // canonical hostname. This list of hostnames then replaces the + // original address list. Similar to the `client.dns.lookup` option in + // the JVM client, this is especially useful with GSSAPI, where it + // allows providing an alias record instead of individual broker + // hostnames. Defaults to false. + ResolveCanonicalBootstrapServers bool + TLS struct { // Whether or not to use TLS when connecting to the broker // (defaults to false). @@ -272,7 +283,6 @@ type Config struct { // Consumer is the namespace for configuration related to consuming messages, // used by the Consumer. Consumer struct { - // Group is the namespace for configuring consumer group. Group struct { Session struct { @@ -294,7 +304,7 @@ type Config struct { Interval time.Duration } Rebalance struct { - // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + // Strategy for allocating topic partitions to members. // Deprecated: Strategy exists for historical compatibility // and should not be used. Please use GroupStrategies. Strategy BalanceStrategy @@ -302,7 +312,7 @@ type Config struct { // GroupStrategies is the priority-ordered list of client-side consumer group // balancing strategies that will be offered to the coordinator. The first // strategy that all group members support will be chosen by the leader. - // default: [BalanceStrategyRange] + // default: [ NewBalanceStrategyRange() ] GroupStrategies []BalanceStrategy // The maximum allowed time for each worker to join the group once a rebalance has begun. @@ -505,7 +515,7 @@ func NewConfig() *Config { c.Net.ReadTimeout = 30 * time.Second c.Net.WriteTimeout = 30 * time.Second c.Net.SASL.Handshake = true - c.Net.SASL.Version = SASLHandshakeV0 + c.Net.SASL.Version = SASLHandshakeV1 c.Metadata.Retry.Max = 3 c.Metadata.Retry.Backoff = 250 * time.Millisecond @@ -539,7 +549,7 @@ func NewConfig() *Config { c.Consumer.Group.Session.Timeout = 10 * time.Second c.Consumer.Group.Heartbeat.Interval = 3 * time.Second - c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{BalanceStrategyRange} + c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{NewBalanceStrategyRange()} c.Consumer.Group.Rebalance.Timeout = 60 * time.Second c.Consumer.Group.Rebalance.Retry.Max = 4 c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second @@ -650,19 +660,26 @@ func (c *Config) Validate() error { return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used") } - if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH { + switch c.Net.SASL.GSSAPI.AuthType { + case KRB5_USER_AUTH: if c.Net.SASL.GSSAPI.Password == "" { return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH") } - } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH { + case KRB5_KEYTAB_AUTH: if c.Net.SASL.GSSAPI.KeyTabPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + - " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + } + case KRB5_CCACHE_AUTH: + if c.Net.SASL.GSSAPI.CCachePath == "" { + return ConfigurationError("Net.SASL.GSSAPI.CCachePath must not be empty when GSS-API mechanism is used" + + " and Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH") } - } else { - return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH") + default: + return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH, KRB5_KEYTAB_AUTH, and KRB5_CCACHE_AUTH") } + if c.Net.SASL.GSSAPI.KerberosConfigPath == "" { return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used") } @@ -831,8 +848,11 @@ func (c *Config) Validate() error { switch { case c.ChannelBufferSize < 0: return ConfigurationError("ChannelBufferSize must be >= 0") - case !validID.MatchString(c.ClientID): - return ConfigurationError("ClientID is invalid") + } + + // only validate clientID locally for Kafka versions before KIP-190 was implemented + if !c.Version.IsAtLeast(V1_0_0_0) && !validClientID.MatchString(c.ClientID) { + return ConfigurationError(fmt.Sprintf("ClientID value %q is not valid for Kafka versions before 1.0.0", c.ClientID)) } return nil @@ -840,7 +860,7 @@ func (c *Config) Validate() error { func (c *Config) getDialer() proxy.Dialer { if c.Net.Proxy.Enable { - Logger.Printf("using proxy %s", c.Net.Proxy.Dialer) + Logger.Println("using proxy") return c.Net.Proxy.Dialer } else { return &net.Dialer{ diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/IBM/sarama/config_resource_type.go similarity index 100% rename from vendor/github.com/Shopify/sarama/config_resource_type.go rename to vendor/github.com/IBM/sarama/config_resource_type.go diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/IBM/sarama/consumer.go similarity index 93% rename from vendor/github.com/Shopify/sarama/consumer.go rename to vendor/github.com/IBM/sarama/consumer.go index d635dfe81d..60556a566d 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/IBM/sarama/consumer.go @@ -85,13 +85,13 @@ type Consumer interface { // New calls to the broker will return records from these partitions if there are any to be fetched. Resume(topicPartitions map[string][]int32) - // Pause suspends fetching from all partitions. Future calls to the broker will not return any + // PauseAll suspends fetching from all partitions. Future calls to the broker will not return any // records from these partitions until they have been resumed using Resume()/ResumeAll(). // Note that this method does not affect partition subscription. // In particular, it does not cause a group rebalance when automatic assignment is used. PauseAll() - // Resume resumes all partitions which have been paused with Pause()/PauseAll(). + // ResumeAll resumes all partitions which have been paused with Pause()/PauseAll(). // New calls to the broker will return records from these partitions if there are any to be fetched. ResumeAll() } @@ -165,6 +165,7 @@ func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), feeder: make(chan *FetchResponse, 1), + leaderEpoch: invalidLeaderEpoch, preferredReadReplica: invalidPreferredReplicaID, trigger: make(chan none, 1), dying: make(chan none), @@ -175,9 +176,8 @@ func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) return nil, err } - var leader *Broker - var err error - if leader, err = c.client.Leader(child.topic, child.partition); err != nil { + leader, epoch, err := c.client.LeaderAndEpoch(child.topic, child.partition) + if err != nil { return nil, err } @@ -188,6 +188,7 @@ func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) go withRecover(child.dispatcher) go withRecover(child.responseFeeder) + child.leaderEpoch = epoch child.broker = c.refBrokerConsumer(leader) child.broker.input <- child @@ -400,6 +401,7 @@ type partitionConsumer struct { errors chan *ConsumerError feeder chan *FetchResponse + leaderEpoch int32 preferredReadReplica int32 trigger, dying chan none @@ -463,11 +465,11 @@ func (child *partitionConsumer) dispatcher() { close(child.feeder) } -func (child *partitionConsumer) preferredBroker() (*Broker, error) { +func (child *partitionConsumer) preferredBroker() (*Broker, int32, error) { if child.preferredReadReplica >= 0 { broker, err := child.consumer.client.Broker(child.preferredReadReplica) if err == nil { - return broker, nil + return broker, child.leaderEpoch, nil } Logger.Printf( "consumer/%s/%d failed to find active broker for preferred read replica %d - will fallback to leader", @@ -480,7 +482,7 @@ func (child *partitionConsumer) preferredBroker() (*Broker, error) { } // if preferred replica cannot be found fallback to leader - return child.consumer.client.Leader(child.topic, child.partition) + return child.consumer.client.LeaderAndEpoch(child.topic, child.partition) } func (child *partitionConsumer) dispatch() error { @@ -488,13 +490,13 @@ func (child *partitionConsumer) dispatch() error { return err } - broker, err := child.preferredBroker() + broker, epoch, err := child.preferredBroker() if err != nil { return err } + child.leaderEpoch = epoch child.broker = child.consumer.refBrokerConsumer(broker) - child.broker.input <- child return nil @@ -918,7 +920,7 @@ func (bc *brokerConsumer) subscriptionManager() { } // subscriptionConsumer ensures we will get nil right away if no new subscriptions is available -// this is a the main loop that fetches Kafka messages +// this is the main loop that fetches Kafka messages func (bc *brokerConsumer) subscriptionConsumer() { for newSubscriptions := range bc.newSubscriptions { bc.updateSubscriptions(newSubscriptions) @@ -940,6 +942,7 @@ func (bc *brokerConsumer) subscriptionConsumer() { // if there isn't response, it means that not fetch was made // so we don't need to handle any response if response == nil { + time.Sleep(partitionConsumersBatchTimeout) continue } @@ -987,7 +990,7 @@ func (bc *brokerConsumer) handleResponses() { child.responseResult = nil if result == nil { - if preferredBroker, err := child.preferredBroker(); err == nil { + if preferredBroker, _, err := child.preferredBroker(); err == nil { if bc.broker.ID() != preferredBroker.ID() { // not an error but needs redispatching to consume from preferred replica Logger.Printf( @@ -1014,7 +1017,12 @@ func (bc *brokerConsumer) handleResponses() { Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) close(child.trigger) delete(bc.subscriptions, child) - } else if errors.Is(result, ErrUnknownTopicOrPartition) || errors.Is(result, ErrNotLeaderForPartition) || errors.Is(result, ErrLeaderNotAvailable) || errors.Is(result, ErrReplicaNotAvailable) { + } else if errors.Is(result, ErrUnknownTopicOrPartition) || + errors.Is(result, ErrNotLeaderForPartition) || + errors.Is(result, ErrLeaderNotAvailable) || + errors.Is(result, ErrReplicaNotAvailable) || + errors.Is(result, ErrFencedLeaderEpoch) || + errors.Is(result, ErrUnknownLeaderEpoch) { // not an error, but does need redispatching Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", bc.broker.ID(), child.topic, child.partition, result) @@ -1060,20 +1068,35 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { MinBytes: bc.consumer.conf.Consumer.Fetch.Min, MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), } + // Version 1 is the same as version 0. if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) { request.Version = 1 } + // Starting in Version 2, the requestor must be able to handle Kafka Log + // Message format version 1. if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { request.Version = 2 } + // Version 3 adds MaxBytes. Starting in version 3, the partition ordering in + // the request is now relevant. Partitions will be processed in the order + // they appear in the request. if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { request.Version = 3 request.MaxBytes = MaxResponseSize } + // Version 4 adds IsolationLevel. Starting in version 4, the reqestor must be + // able to handle Kafka log message format version 2. + // Version 5 adds LogStartOffset to indicate the earliest available offset of + // partition data that can be consumed. if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { - request.Version = 4 + request.Version = 5 request.Isolation = bc.consumer.conf.Consumer.IsolationLevel } + // Version 6 is the same as version 5. + if bc.consumer.conf.Version.IsAtLeast(V1_0_0_0) { + request.Version = 6 + } + // Version 7 adds incremental fetch request support. if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) { request.Version = 7 // We do not currently implement KIP-227 FetchSessions. Setting the id to 0 @@ -1082,9 +1105,17 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { request.SessionID = 0 request.SessionEpoch = -1 } + // Version 8 is the same as version 7. + if bc.consumer.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 8 + } + // Version 9 adds CurrentLeaderEpoch, as described in KIP-320. + // Version 10 indicates that we can use the ZStd compression algorithm, as + // described in KIP-110. if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) { request.Version = 10 } + // Version 11 adds RackID for KIP-392 fetch from closest replica if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) { request.Version = 11 request.RackID = bc.consumer.conf.RackID @@ -1092,7 +1123,7 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { for child := range bc.subscriptions { if !child.IsPaused() { - request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize, child.leaderEpoch) } } diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/IBM/sarama/consumer_group.go similarity index 85% rename from vendor/github.com/Shopify/sarama/consumer_group.go rename to vendor/github.com/IBM/sarama/consumer_group.go index 4f0fd66606..53b64dd3b8 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group.go +++ b/vendor/github.com/IBM/sarama/consumer_group.go @@ -114,6 +114,9 @@ func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerG // necessary to call Close() on the underlying client when shutting down this consumer. // PLEASE NOTE: consumer groups can only re-use but not share clients. func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) { + if client == nil { + return nil, ConfigurationError("client must not be nil") + } // For clients passed in by the client, ensure we don't // call Close() on it. cli := &nopCloserClient{client} @@ -126,7 +129,7 @@ func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0") } - consumer, err := NewConsumerFromClient(client) + consumer, err := newConsumer(client) if err != nil { return nil, err } @@ -141,8 +144,8 @@ func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { userData: config.Consumer.Group.Member.UserData, metricRegistry: newCleanupRegistry(config.MetricRegistry), } - if client.Config().Consumer.Group.InstanceId != "" && config.Version.IsAtLeast(V2_3_0_0) { - cg.groupInstanceId = &client.Config().Consumer.Group.InstanceId + if config.Consumer.Group.InstanceId != "" && config.Version.IsAtLeast(V2_3_0_0) { + cg.groupInstanceId = &config.Consumer.Group.InstanceId } return cg, nil } @@ -210,13 +213,11 @@ func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler Co return err } - // loop check topic partition numbers changed - // will trigger rebalance when any topic partitions number had changed - // avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine - go c.loopCheckPartitionNumbers(topics, sess) - - // Wait for session exit signal - <-sess.ctx.Done() + // Wait for session exit signal or Close() call + select { + case <-c.closed: + case <-sess.ctx.Done(): + } // Gracefully release session claims return sess.release(true) @@ -244,6 +245,8 @@ func (c *consumerGroup) ResumeAll() { func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) { select { + case <-ctx.Done(): + return nil, ctx.Err() case <-c.closed: return nil, ErrClosedConsumerGroup case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): @@ -252,7 +255,10 @@ func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, ha if refreshCoordinator { err := c.client.RefreshCoordinator(c.groupID) if err != nil { - return c.retryNewSession(ctx, topics, handler, retries, true) + if retries <= 0 { + return nil, err + } + return c.retryNewSession(ctx, topics, handler, retries-1, true) } } @@ -260,6 +266,9 @@ func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, ha } func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { + if ctx.Err() != nil { + return nil, ctx.Err() + } coordinator, err := c.client.Coordinator(c.groupID) if err != nil { if retries <= 0 { @@ -315,10 +324,12 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler } return c.retryNewSession(ctx, topics, handler, retries, true) case ErrMemberIdRequired: - // from JoinGroupRequest v4, if client start with empty member id, - // it need to get member id from response and send another join request to join group + // from JoinGroupRequest v4 onwards (due to KIP-394) if the client starts + // with an empty member id, it needs to get the assigned id from the + // response and send another join request with that id to actually join the + // group c.memberID = join.MemberId - return c.retryNewSession(ctx, topics, handler, retries+1 /*keep retry time*/, false) + return c.newSession(ctx, topics, handler, retries) case ErrFencedInstancedId: if c.groupInstanceId != nil { Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId) @@ -342,13 +353,15 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler // Prepare distribution plan if we joined as the leader var plan BalanceStrategyPlan var members map[string]ConsumerGroupMemberMetadata + var allSubscribedTopicPartitions map[string][]int32 + var allSubscribedTopics []string if join.LeaderId == join.MemberId { members, err = join.GetMembers() if err != nil { return nil, err } - plan, err = c.balance(strategy, members) + allSubscribedTopicPartitions, allSubscribedTopics, plan, err = c.balance(strategy, members) if err != nil { return nil, err } @@ -403,7 +416,7 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler claims = members.Topics // in the case of stateful balance strategies, hold on to the returned - // assignment metadata, otherwise, reset the statically defined conusmer + // assignment metadata, otherwise, reset the statically defined consumer // group metadata if members.UserData != nil { c.userData = members.UserData @@ -416,7 +429,17 @@ func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler } } - return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler) + session, err := newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler) + if err != nil { + return nil, err + } + + // only the leader needs to check whether there are newly-added partitions in order to trigger a rebalance + if join.LeaderId == join.MemberId { + go c.loopCheckPartitionNumbers(allSubscribedTopicPartitions, allSubscribedTopics, session) + } + + return session, err } func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) { @@ -430,7 +453,23 @@ func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) ( req.Version = 1 req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) } - if c.groupInstanceId != nil { + if c.config.Version.IsAtLeast(V0_11_0_0) { + req.Version = 2 + } + if c.config.Version.IsAtLeast(V0_11_0_0) { + req.Version = 2 + } + if c.config.Version.IsAtLeast(V2_0_0_0) { + req.Version = 3 + } + // from JoinGroupRequest v4 onwards (due to KIP-394) the client will actually + // send two JoinGroupRequests, once with the empty member id, and then again + // with the assigned id from the first response. This is handled via the + // ErrMemberIdRequired case. + if c.config.Version.IsAtLeast(V2_2_0_0) { + req.Version = 4 + } + if c.config.Version.IsAtLeast(V2_3_0_0) { req.Version = 5 req.GroupInstanceId = c.groupInstanceId } @@ -479,12 +518,19 @@ func (c *consumerGroup) syncGroupRequest( GenerationId: generationID, } + // Versions 1 and 2 are the same as version 0. + if c.config.Version.IsAtLeast(V0_11_0_0) { + req.Version = 1 + } + if c.config.Version.IsAtLeast(V2_0_0_0) { + req.Version = 2 + } + // Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts. if c.config.Version.IsAtLeast(V2_3_0_0) { req.Version = 3 - } - if c.groupInstanceId != nil { req.GroupInstanceId = c.groupInstanceId } + for memberID, topics := range plan { assignment := &ConsumerGroupMemberAssignment{Topics: topics} userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID) @@ -513,7 +559,16 @@ func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, g MemberId: memberID, GenerationId: generationID, } - if c.groupInstanceId != nil { + + // Version 1 and version 2 are the same as version 0. + if c.config.Version.IsAtLeast(V0_11_0_0) { + req.Version = 1 + } + if c.config.Version.IsAtLeast(V2_0_0_0) { + req.Version = 2 + } + // Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts. + if c.config.Version.IsAtLeast(V2_3_0_0) { req.Version = 3 req.GroupInstanceId = c.groupInstanceId } @@ -521,23 +576,36 @@ func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, g return coordinator.Heartbeat(req) } -func (c *consumerGroup) balance(strategy BalanceStrategy, members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) { - topics := make(map[string][]int32) +func (c *consumerGroup) balance(strategy BalanceStrategy, members map[string]ConsumerGroupMemberMetadata) (map[string][]int32, []string, BalanceStrategyPlan, error) { + topicPartitions := make(map[string][]int32) for _, meta := range members { for _, topic := range meta.Topics { - topics[topic] = nil + topicPartitions[topic] = nil } } - for topic := range topics { + allSubscribedTopics := make([]string, 0, len(topicPartitions)) + for topic := range topicPartitions { + allSubscribedTopics = append(allSubscribedTopics, topic) + } + + // refresh metadata for all the subscribed topics in the consumer group + // to avoid using stale metadata to assigning partitions + err := c.client.RefreshMetadata(allSubscribedTopics...) + if err != nil { + return nil, nil, nil, err + } + + for topic := range topicPartitions { partitions, err := c.client.Partitions(topic) if err != nil { - return nil, err + return nil, nil, nil, err } - topics[topic] = partitions + topicPartitions[topic] = partitions } - return strategy.Plan(members, topics) + plan, err := strategy.Plan(members, topicPartitions) + return topicPartitions, allSubscribedTopics, plan, err } // Leaves the cluster, called by Close. @@ -553,32 +621,43 @@ func (c *consumerGroup) leave() error { return err } - // KIP-345 if groupInstanceId is set, don not leave group when consumer closed. - // Since we do not discover ApiVersion for brokers, LeaveGroupRequest still use the old version request for now - if c.groupInstanceId == nil { - resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{ - GroupId: c.groupID, + // as per KIP-345 if groupInstanceId is set, i.e. static membership is in action, then do not leave group when consumer closed, just clear memberID + if c.groupInstanceId != nil { + c.memberID = "" + return nil + } + req := &LeaveGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + } + if c.config.Version.IsAtLeast(V0_11_0_0) { + req.Version = 1 + } + if c.config.Version.IsAtLeast(V2_0_0_0) { + req.Version = 2 + } + if c.config.Version.IsAtLeast(V2_4_0_0) { + req.Version = 3 + req.Members = append(req.Members, MemberIdentity{ MemberId: c.memberID, }) - if err != nil { - _ = coordinator.Close() - return err - } + } - // Unset memberID - c.memberID = "" + resp, err := coordinator.LeaveGroup(req) + if err != nil { + _ = coordinator.Close() + return err + } - // Check response - switch resp.Err { - case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: - return nil - default: - return resp.Err - } - } else { - c.memberID = "" + // clear the memberID + c.memberID = "" + + switch resp.Err { + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: + return nil + default: + return resp.Err } - return nil } func (c *consumerGroup) handleError(err error, topic string, partition int32) { @@ -612,24 +691,29 @@ func (c *consumerGroup) handleError(err error, topic string, partition int32) { } } -func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) { +func (c *consumerGroup) loopCheckPartitionNumbers(allSubscribedTopicPartitions map[string][]int32, topics []string, session *consumerGroupSession) { if c.config.Metadata.RefreshFrequency == time.Duration(0) { return } - pause := time.NewTicker(c.config.Metadata.RefreshFrequency) + defer session.cancel() - defer pause.Stop() - var oldTopicToPartitionNum map[string]int - var err error - if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil { - return + + oldTopicToPartitionNum := make(map[string]int, len(allSubscribedTopicPartitions)) + for topic, partitions := range allSubscribedTopicPartitions { + oldTopicToPartitionNum[topic] = len(partitions) } + + pause := time.NewTicker(c.config.Metadata.RefreshFrequency) + defer pause.Stop() for { if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil { return } else { for topic, num := range oldTopicToPartitionNum { if newTopicToPartitionNum[topic] != num { + Logger.Printf( + "consumergroup/%s loop check partition number goroutine find partitions in topics %s changed from %d to %d\n", + c.groupID, topics, num, newTopicToPartitionNum[topic]) return // trigger the end of the session on exit } } @@ -638,7 +722,7 @@ func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *cons case <-pause.C: case <-session.ctx.Done(): Logger.Printf( - "consumergroup/%s loop check partition number coroutine will exit, topics %s\n", + "consumergroup/%s loop check partition number goroutine will exit, topics %s\n", c.groupID, topics) // if session closed by other, should be exited return @@ -1013,7 +1097,7 @@ type ConsumerGroupClaim interface { // InitialOffset returns the initial offset that was used as a starting point for this claim. InitialOffset() int64 - // HighWaterMarkOffset returns the high water mark offset of the partition, + // HighWaterMarkOffset returns the high watermark offset of the partition, // i.e. the offset that will be used for the next message that will be produced. // You can use this to determine how far behind the processing is. HighWaterMarkOffset() int64 diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/IBM/sarama/consumer_group_members.go similarity index 73% rename from vendor/github.com/Shopify/sarama/consumer_group_members.go rename to vendor/github.com/IBM/sarama/consumer_group_members.go index 3b8ca36f60..2d38960919 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group_members.go +++ b/vendor/github.com/IBM/sarama/consumer_group_members.go @@ -9,6 +9,8 @@ type ConsumerGroupMemberMetadata struct { Topics []string UserData []byte OwnedPartitions []*OwnedPartition + GenerationID int32 + RackID *string } func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { @@ -22,6 +24,27 @@ func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { return err } + if m.Version >= 1 { + if err := pe.putArrayLength(len(m.OwnedPartitions)); err != nil { + return err + } + for _, op := range m.OwnedPartitions { + if err := op.encode(pe); err != nil { + return err + } + } + } + + if m.Version >= 2 { + pe.putInt32(m.GenerationID) + } + + if m.Version >= 3 { + if err := pe.putNullableString(m.RackID); err != nil { + return err + } + } + return nil } @@ -48,18 +71,29 @@ func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { } return err } - if n == 0 { - return nil - } - m.OwnedPartitions = make([]*OwnedPartition, n) - for i := 0; i < n; i++ { - m.OwnedPartitions[i] = &OwnedPartition{} - if err := m.OwnedPartitions[i].decode(pd); err != nil { - return err + if n > 0 { + m.OwnedPartitions = make([]*OwnedPartition, n) + for i := 0; i < n; i++ { + m.OwnedPartitions[i] = &OwnedPartition{} + if err := m.OwnedPartitions[i].decode(pd); err != nil { + return err + } } } } + if m.Version >= 2 { + if m.GenerationID, err = pd.getInt32(); err != nil { + return err + } + } + + if m.Version >= 3 { + if m.RackID, err = pd.getNullableString(); err != nil { + return err + } + } + return nil } @@ -68,6 +102,16 @@ type OwnedPartition struct { Partitions []int32 } +func (m *OwnedPartition) encode(pe packetEncoder) error { + if err := pe.putString(m.Topic); err != nil { + return err + } + if err := pe.putInt32Array(m.Partitions); err != nil { + return err + } + return nil +} + func (m *OwnedPartition) decode(pd packetDecoder) (err error) { if m.Topic, err = pd.getString(); err != nil { return err diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/IBM/sarama/consumer_metadata_request.go similarity index 75% rename from vendor/github.com/Shopify/sarama/consumer_metadata_request.go rename to vendor/github.com/IBM/sarama/consumer_metadata_request.go index 5c18e048a7..ef6b9e7217 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go +++ b/vendor/github.com/IBM/sarama/consumer_metadata_request.go @@ -2,6 +2,7 @@ package sarama // ConsumerMetadataRequest is used for metadata requests type ConsumerMetadataRequest struct { + Version int16 ConsumerGroup string } @@ -9,6 +10,7 @@ func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { tmp := new(FindCoordinatorRequest) tmp.CoordinatorKey = r.ConsumerGroup tmp.CoordinatorType = CoordinatorGroup + tmp.Version = r.Version return tmp.encode(pe) } @@ -26,13 +28,24 @@ func (r *ConsumerMetadataRequest) key() int16 { } func (r *ConsumerMetadataRequest) version() int16 { - return 0 + return r.Version } func (r *ConsumerMetadataRequest) headerVersion() int16 { return 1 } +func (r *ConsumerMetadataRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { - return V0_8_2_0 + switch r.Version { + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } } diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/IBM/sarama/consumer_metadata_response.go similarity index 86% rename from vendor/github.com/Shopify/sarama/consumer_metadata_response.go rename to vendor/github.com/IBM/sarama/consumer_metadata_response.go index 7fe0cf9716..d99209e3b6 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go +++ b/vendor/github.com/IBM/sarama/consumer_metadata_response.go @@ -7,6 +7,7 @@ import ( // ConsumerMetadataResponse holds the response for a consumer group meta data requests type ConsumerMetadataResponse struct { + Version int16 Err KError Coordinator *Broker CoordinatorID int32 // deprecated: use Coordinator.ID() @@ -53,7 +54,7 @@ func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { } tmp := &FindCoordinatorResponse{ - Version: 0, + Version: r.Version, Err: r.Err, Coordinator: r.Coordinator, } @@ -70,13 +71,24 @@ func (r *ConsumerMetadataResponse) key() int16 { } func (r *ConsumerMetadataResponse) version() int16 { - return 0 + return r.Version } func (r *ConsumerMetadataResponse) headerVersion() int16 { return 0 } +func (r *ConsumerMetadataResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { - return V0_8_2_0 + switch r.Version { + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } } diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/IBM/sarama/control_record.go similarity index 100% rename from vendor/github.com/Shopify/sarama/control_record.go rename to vendor/github.com/IBM/sarama/control_record.go diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/IBM/sarama/crc32_field.go similarity index 100% rename from vendor/github.com/Shopify/sarama/crc32_field.go rename to vendor/github.com/IBM/sarama/crc32_field.go diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/IBM/sarama/create_partitions_request.go similarity index 90% rename from vendor/github.com/Shopify/sarama/create_partitions_request.go rename to vendor/github.com/IBM/sarama/create_partitions_request.go index 46fb044024..3f5512656b 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_request.go +++ b/vendor/github.com/IBM/sarama/create_partitions_request.go @@ -3,6 +3,7 @@ package sarama import "time" type CreatePartitionsRequest struct { + Version int16 TopicPartitions map[string]*TopicPartition Timeout time.Duration ValidateOnly bool @@ -64,15 +65,26 @@ func (r *CreatePartitionsRequest) key() int16 { } func (r *CreatePartitionsRequest) version() int16 { - return 0 + return r.Version } func (r *CreatePartitionsRequest) headerVersion() int16 { return 1 } +func (r *CreatePartitionsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { - return V1_0_0_0 + switch r.Version { + case 1: + return V2_0_0_0 + case 0: + return V1_0_0_0 + default: + return V2_0_0_0 + } } type TopicPartition struct { diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/IBM/sarama/create_partitions_response.go similarity index 86% rename from vendor/github.com/Shopify/sarama/create_partitions_response.go rename to vendor/github.com/IBM/sarama/create_partitions_response.go index 235787f133..c9e7ea72cd 100644 --- a/vendor/github.com/Shopify/sarama/create_partitions_response.go +++ b/vendor/github.com/IBM/sarama/create_partitions_response.go @@ -6,6 +6,7 @@ import ( ) type CreatePartitionsResponse struct { + Version int16 ThrottleTime time.Duration TopicPartitionErrors map[string]*TopicPartitionError } @@ -60,15 +61,30 @@ func (r *CreatePartitionsResponse) key() int16 { } func (r *CreatePartitionsResponse) version() int16 { - return 0 + return r.Version } func (r *CreatePartitionsResponse) headerVersion() int16 { return 0 } +func (r *CreatePartitionsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { - return V1_0_0_0 + switch r.Version { + case 1: + return V2_0_0_0 + case 0: + return V1_0_0_0 + default: + return V2_0_0_0 + } +} + +func (r *CreatePartitionsResponse) throttleTime() time.Duration { + return r.ThrottleTime } type TopicPartitionError struct { diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go similarity index 74% rename from vendor/github.com/Shopify/sarama/create_topics_request.go rename to vendor/github.com/IBM/sarama/create_topics_request.go index 287acd069b..8382d17c20 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_request.go +++ b/vendor/github.com/IBM/sarama/create_topics_request.go @@ -5,10 +5,14 @@ import ( ) type CreateTopicsRequest struct { + // Version defines the protocol version to use for encode and decode Version int16 - + // TopicDetails contains the topics to create. TopicDetails map[string]*TopicDetail - Timeout time.Duration + // Timeout contains how long to wait before timing out the request. + Timeout time.Duration + // ValidateOnly if true, check that the topics can be created as specified, + // but don't create anything. ValidateOnly bool } @@ -83,22 +87,39 @@ func (r *CreateTopicsRequest) headerVersion() int16 { return 1 } +func (c *CreateTopicsRequest) isValidVersion() bool { + return c.Version >= 0 && c.Version <= 3 +} + func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { switch c.Version { + case 3: + return V2_0_0_0 case 2: - return V1_0_0_0 - case 1: return V0_11_0_0 - default: + case 1: + return V0_10_2_0 + case 0: return V0_10_1_0 + default: + return V2_8_0_0 } } type TopicDetail struct { - NumPartitions int32 + // NumPartitions contains the number of partitions to create in the topic, or + // -1 if we are either specifying a manual partition assignment or using the + // default partitions. + NumPartitions int32 + // ReplicationFactor contains the number of replicas to create for each + // partition in the topic, or -1 if we are either specifying a manual + // partition assignment or using the default replication factor. ReplicationFactor int16 + // ReplicaAssignment contains the manual partition assignment, or the empty + // array if we are using automatic assignment. ReplicaAssignment map[int32][]int32 - ConfigEntries map[string]*string + // ConfigEntries contains the custom topic configurations to set. + ConfigEntries map[string]*string } func (t *TopicDetail) encode(pe packetEncoder) error { diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/IBM/sarama/create_topics_response.go similarity index 78% rename from vendor/github.com/Shopify/sarama/create_topics_response.go rename to vendor/github.com/IBM/sarama/create_topics_response.go index 6b940bff06..85bd4c0b93 100644 --- a/vendor/github.com/Shopify/sarama/create_topics_response.go +++ b/vendor/github.com/IBM/sarama/create_topics_response.go @@ -6,9 +6,13 @@ import ( ) type CreateTopicsResponse struct { - Version int16 + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTime contains the duration for which the request was throttled due + // to a quota violation, or zero if the request did not violate any quota. ThrottleTime time.Duration - TopicErrors map[string]*TopicError + // TopicErrors contains a map of any errors for the topics we tried to create. + TopicErrors map[string]*TopicError } func (c *CreateTopicsResponse) encode(pe packetEncoder) error { @@ -74,17 +78,29 @@ func (c *CreateTopicsResponse) headerVersion() int16 { return 0 } +func (c *CreateTopicsResponse) isValidVersion() bool { + return c.Version >= 0 && c.Version <= 3 +} + func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { switch c.Version { + case 3: + return V2_0_0_0 case 2: - return V1_0_0_0 - case 1: return V0_11_0_0 - default: + case 1: + return V0_10_2_0 + case 0: return V0_10_1_0 + default: + return V2_8_0_0 } } +func (r *CreateTopicsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} + type TopicError struct { Err KError ErrMsg *string diff --git a/vendor/github.com/IBM/sarama/decompress.go b/vendor/github.com/IBM/sarama/decompress.go new file mode 100644 index 0000000000..0a09983294 --- /dev/null +++ b/vendor/github.com/IBM/sarama/decompress.go @@ -0,0 +1,98 @@ +package sarama + +import ( + "bytes" + "fmt" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/klauspost/compress/gzip" + "github.com/pierrec/lz4/v4" +) + +var ( + lz4ReaderPool = sync.Pool{ + New: func() interface{} { + return lz4.NewReader(nil) + }, + } + + gzipReaderPool sync.Pool + + bufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + bytesPool = sync.Pool{ + New: func() interface{} { + res := make([]byte, 0, 4096) + return &res + }, + } +) + +func decompress(cc CompressionCodec, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var err error + reader, ok := gzipReaderPool.Get().(*gzip.Reader) + if !ok { + reader, err = gzip.NewReader(bytes.NewReader(data)) + } else { + err = reader.Reset(bytes.NewReader(data)) + } + + if err != nil { + return nil, err + } + + buffer := bufferPool.Get().(*bytes.Buffer) + _, err = buffer.ReadFrom(reader) + // copy the buffer to a new slice with the correct length + // reuse gzipReader and buffer + gzipReaderPool.Put(reader) + res := make([]byte, buffer.Len()) + copy(res, buffer.Bytes()) + buffer.Reset() + bufferPool.Put(buffer) + + return res, err + case CompressionSnappy: + return snappy.Decode(data) + case CompressionLZ4: + reader, ok := lz4ReaderPool.Get().(*lz4.Reader) + if !ok { + reader = lz4.NewReader(bytes.NewReader(data)) + } else { + reader.Reset(bytes.NewReader(data)) + } + buffer := bufferPool.Get().(*bytes.Buffer) + _, err := buffer.ReadFrom(reader) + // copy the buffer to a new slice with the correct length + // reuse lz4Reader and buffer + lz4ReaderPool.Put(reader) + res := make([]byte, buffer.Len()) + copy(res, buffer.Bytes()) + buffer.Reset() + bufferPool.Put(buffer) + + return res, err + case CompressionZSTD: + buffer := *bytesPool.Get().(*[]byte) + var err error + buffer, err = zstdDecompress(ZstdDecoderParams{}, buffer, data) + // copy the buffer to a new slice with the correct length and reuse buffer + res := make([]byte, len(buffer)) + copy(res, buffer) + buffer = buffer[:0] + bytesPool.Put(&buffer) + + return res, err + default: + return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/IBM/sarama/delete_groups_request.go similarity index 71% rename from vendor/github.com/Shopify/sarama/delete_groups_request.go rename to vendor/github.com/IBM/sarama/delete_groups_request.go index 4ac8bbee4c..2fdfc33869 100644 --- a/vendor/github.com/Shopify/sarama/delete_groups_request.go +++ b/vendor/github.com/IBM/sarama/delete_groups_request.go @@ -1,7 +1,8 @@ package sarama type DeleteGroupsRequest struct { - Groups []string + Version int16 + Groups []string } func (r *DeleteGroupsRequest) encode(pe packetEncoder) error { @@ -18,15 +19,26 @@ func (r *DeleteGroupsRequest) key() int16 { } func (r *DeleteGroupsRequest) version() int16 { - return 0 + return r.Version } func (r *DeleteGroupsRequest) headerVersion() int16 { return 1 } +func (r *DeleteGroupsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { - return V1_1_0_0 + switch r.Version { + case 1: + return V2_0_0_0 + case 0: + return V1_1_0_0 + default: + return V2_0_0_0 + } } func (r *DeleteGroupsRequest) AddGroup(group string) { diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/IBM/sarama/delete_groups_response.go similarity index 80% rename from vendor/github.com/Shopify/sarama/delete_groups_response.go rename to vendor/github.com/IBM/sarama/delete_groups_response.go index 5e7b1ed368..e490f83146 100644 --- a/vendor/github.com/Shopify/sarama/delete_groups_response.go +++ b/vendor/github.com/IBM/sarama/delete_groups_response.go @@ -5,6 +5,7 @@ import ( ) type DeleteGroupsResponse struct { + Version int16 ThrottleTime time.Duration GroupErrorCodes map[string]KError } @@ -62,13 +63,28 @@ func (r *DeleteGroupsResponse) key() int16 { } func (r *DeleteGroupsResponse) version() int16 { - return 0 + return r.Version } func (r *DeleteGroupsResponse) headerVersion() int16 { return 0 } +func (r *DeleteGroupsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { - return V1_1_0_0 + switch r.Version { + case 1: + return V2_0_0_0 + case 0: + return V1_1_0_0 + default: + return V2_0_0_0 + } +} + +func (r *DeleteGroupsResponse) throttleTime() time.Duration { + return r.ThrottleTime } diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_request.go b/vendor/github.com/IBM/sarama/delete_offsets_request.go similarity index 93% rename from vendor/github.com/Shopify/sarama/delete_offsets_request.go rename to vendor/github.com/IBM/sarama/delete_offsets_request.go index 339c7857ca..06b864d18f 100644 --- a/vendor/github.com/Shopify/sarama/delete_offsets_request.go +++ b/vendor/github.com/IBM/sarama/delete_offsets_request.go @@ -1,6 +1,7 @@ package sarama type DeleteOffsetsRequest struct { + Version int16 Group string partitions map[string][]int32 } @@ -72,13 +73,17 @@ func (r *DeleteOffsetsRequest) key() int16 { } func (r *DeleteOffsetsRequest) version() int16 { - return 0 + return r.Version } func (r *DeleteOffsetsRequest) headerVersion() int16 { return 1 } +func (r *DeleteOffsetsRequest) isValidVersion() bool { + return r.Version == 0 +} + func (r *DeleteOffsetsRequest) requiredVersion() KafkaVersion { return V2_4_0_0 } diff --git a/vendor/github.com/Shopify/sarama/delete_offsets_response.go b/vendor/github.com/IBM/sarama/delete_offsets_response.go similarity index 91% rename from vendor/github.com/Shopify/sarama/delete_offsets_response.go rename to vendor/github.com/IBM/sarama/delete_offsets_response.go index d59ae0f8c1..86c6c51f68 100644 --- a/vendor/github.com/Shopify/sarama/delete_offsets_response.go +++ b/vendor/github.com/IBM/sarama/delete_offsets_response.go @@ -5,6 +5,7 @@ import ( ) type DeleteOffsetsResponse struct { + Version int16 // The top-level error code, or 0 if there was no error. ErrorCode KError ThrottleTime time.Duration @@ -100,13 +101,21 @@ func (r *DeleteOffsetsResponse) key() int16 { } func (r *DeleteOffsetsResponse) version() int16 { - return 0 + return r.Version } func (r *DeleteOffsetsResponse) headerVersion() int16 { return 0 } +func (r *DeleteOffsetsResponse) isValidVersion() bool { + return r.Version == 0 +} + func (r *DeleteOffsetsResponse) requiredVersion() KafkaVersion { return V2_4_0_0 } + +func (r *DeleteOffsetsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/IBM/sarama/delete_records_request.go similarity index 92% rename from vendor/github.com/Shopify/sarama/delete_records_request.go rename to vendor/github.com/IBM/sarama/delete_records_request.go index dc106b17d6..3ca2146afb 100644 --- a/vendor/github.com/Shopify/sarama/delete_records_request.go +++ b/vendor/github.com/IBM/sarama/delete_records_request.go @@ -13,6 +13,7 @@ import ( // id(int32) offset(int64) type DeleteRecordsRequest struct { + Version int16 Topics map[string]*DeleteRecordsRequestTopic Timeout time.Duration } @@ -74,15 +75,24 @@ func (d *DeleteRecordsRequest) key() int16 { } func (d *DeleteRecordsRequest) version() int16 { - return 0 + return d.Version } func (d *DeleteRecordsRequest) headerVersion() int16 { return 1 } +func (d *DeleteRecordsRequest) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } } type DeleteRecordsRequestTopic struct { diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/IBM/sarama/delete_records_response.go similarity index 92% rename from vendor/github.com/Shopify/sarama/delete_records_response.go rename to vendor/github.com/IBM/sarama/delete_records_response.go index d530b4c7e9..2d7db885b1 100644 --- a/vendor/github.com/Shopify/sarama/delete_records_response.go +++ b/vendor/github.com/IBM/sarama/delete_records_response.go @@ -77,15 +77,28 @@ func (d *DeleteRecordsResponse) key() int16 { } func (d *DeleteRecordsResponse) version() int16 { - return 0 + return d.Version } func (d *DeleteRecordsResponse) headerVersion() int16 { return 0 } +func (d *DeleteRecordsResponse) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 1 +} + func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *DeleteRecordsResponse) throttleTime() time.Duration { + return r.ThrottleTime } type DeleteRecordsResponseTopic struct { diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go similarity index 84% rename from vendor/github.com/Shopify/sarama/delete_topics_request.go rename to vendor/github.com/IBM/sarama/delete_topics_request.go index ba6780a8e3..252c0d0259 100644 --- a/vendor/github.com/Shopify/sarama/delete_topics_request.go +++ b/vendor/github.com/IBM/sarama/delete_topics_request.go @@ -42,11 +42,21 @@ func (d *DeleteTopicsRequest) headerVersion() int16 { return 1 } +func (d *DeleteTopicsRequest) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 3 +} + func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { switch d.Version { + case 3: + return V2_1_0_0 + case 2: + return V2_0_0_0 case 1: return V0_11_0_0 - default: + case 0: return V0_10_1_0 + default: + return V2_2_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/IBM/sarama/delete_topics_response.go similarity index 84% rename from vendor/github.com/Shopify/sarama/delete_topics_response.go rename to vendor/github.com/IBM/sarama/delete_topics_response.go index 733961a89a..556da68921 100644 --- a/vendor/github.com/Shopify/sarama/delete_topics_response.go +++ b/vendor/github.com/IBM/sarama/delete_topics_response.go @@ -72,11 +72,25 @@ func (d *DeleteTopicsResponse) headerVersion() int16 { return 0 } +func (d *DeleteTopicsResponse) isValidVersion() bool { + return d.Version >= 0 && d.Version <= 3 +} + func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { switch d.Version { + case 3: + return V2_1_0_0 + case 2: + return V2_0_0_0 case 1: return V0_11_0_0 - default: + case 0: return V0_10_1_0 + default: + return V2_2_0_0 } } + +func (r *DeleteTopicsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go similarity index 96% rename from vendor/github.com/Shopify/sarama/describe_client_quotas_request.go rename to vendor/github.com/IBM/sarama/describe_client_quotas_request.go index 17a82051c5..8869145c37 100644 --- a/vendor/github.com/Shopify/sarama/describe_client_quotas_request.go +++ b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go @@ -11,6 +11,7 @@ package sarama // Components: the components to filter on // Strict: whether the filter only includes specified components type DescribeClientQuotasRequest struct { + Version int16 Components []QuotaFilterComponent Strict bool } @@ -129,13 +130,17 @@ func (d *DescribeClientQuotasRequest) key() int16 { } func (d *DescribeClientQuotasRequest) version() int16 { - return 0 + return d.Version } func (d *DescribeClientQuotasRequest) headerVersion() int16 { return 1 } +func (d *DescribeClientQuotasRequest) isValidVersion() bool { + return d.Version == 0 +} + func (d *DescribeClientQuotasRequest) requiredVersion() KafkaVersion { return V2_6_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go similarity index 95% rename from vendor/github.com/Shopify/sarama/describe_client_quotas_response.go rename to vendor/github.com/IBM/sarama/describe_client_quotas_response.go index 555da0c485..e9bf658adb 100644 --- a/vendor/github.com/Shopify/sarama/describe_client_quotas_response.go +++ b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go @@ -17,6 +17,7 @@ import ( // value => FLOAT64 type DescribeClientQuotasResponse struct { + Version int16 ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. ErrorCode KError // The error code, or `0` if the quota description succeeded. ErrorMsg *string // The error message, or `null` if the quota description succeeded. @@ -223,13 +224,21 @@ func (d *DescribeClientQuotasResponse) key() int16 { } func (d *DescribeClientQuotasResponse) version() int16 { - return 0 + return d.Version } func (d *DescribeClientQuotasResponse) headerVersion() int16 { return 0 } +func (d *DescribeClientQuotasResponse) isValidVersion() bool { + return d.Version == 0 +} + func (d *DescribeClientQuotasResponse) requiredVersion() KafkaVersion { return V2_6_0_0 } + +func (r *DescribeClientQuotasResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/IBM/sarama/describe_configs_request.go similarity index 94% rename from vendor/github.com/Shopify/sarama/describe_configs_request.go rename to vendor/github.com/IBM/sarama/describe_configs_request.go index 4c34880318..d0ab0d6ef7 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_request.go +++ b/vendor/github.com/IBM/sarama/describe_configs_request.go @@ -103,13 +103,19 @@ func (r *DescribeConfigsRequest) headerVersion() int16 { return 1 } +func (r *DescribeConfigsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V1_1_0_0 case 2: return V2_0_0_0 - default: + case 1: + return V1_1_0_0 + case 0: return V0_11_0_0 + default: + return V2_0_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/IBM/sarama/describe_configs_response.go similarity index 92% rename from vendor/github.com/Shopify/sarama/describe_configs_response.go rename to vendor/github.com/IBM/sarama/describe_configs_response.go index 4968f4854a..386a56885a 100644 --- a/vendor/github.com/Shopify/sarama/describe_configs_response.go +++ b/vendor/github.com/IBM/sarama/describe_configs_response.go @@ -34,6 +34,19 @@ const ( SourceDefault ) +type DescribeConfigError struct { + Err KError + ErrMsg string +} + +func (c *DescribeConfigError) Error() string { + text := c.Err.Error() + if c.ErrMsg != "" { + text = fmt.Sprintf("%s - %s", text, c.ErrMsg) + } + return text +} + type DescribeConfigsResponse struct { Version int16 ThrottleTime time.Duration @@ -116,17 +129,27 @@ func (r *DescribeConfigsResponse) headerVersion() int16 { return 0 } +func (r *DescribeConfigsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V1_0_0_0 case 2: return V2_0_0_0 - default: + case 1: + return V1_1_0_0 + case 0: return V0_11_0_0 + default: + return V2_0_0_0 } } +func (r *DescribeConfigsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} + func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(r.ErrorCode) diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/IBM/sarama/describe_groups_request.go similarity index 81% rename from vendor/github.com/Shopify/sarama/describe_groups_request.go rename to vendor/github.com/IBM/sarama/describe_groups_request.go index f81f69ac4b..c43262e86d 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/IBM/sarama/describe_groups_request.go @@ -42,12 +42,25 @@ func (r *DescribeGroupsRequest) headerVersion() int16 { return 1 } +func (r *DescribeGroupsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3, 4: + case 4: + return V2_4_0_0 + case 3: return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_4_0_0 } - return V0_9_0_0 } func (r *DescribeGroupsRequest) AddGroup(group string) { diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/IBM/sarama/describe_groups_response.go similarity index 94% rename from vendor/github.com/Shopify/sarama/describe_groups_response.go rename to vendor/github.com/IBM/sarama/describe_groups_response.go index 09052e4310..dbc46dd089 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/IBM/sarama/describe_groups_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type DescribeGroupsResponse struct { // Version defines the protocol version to use for encode and decode Version int16 @@ -63,12 +65,29 @@ func (r *DescribeGroupsResponse) headerVersion() int16 { return 0 } +func (r *DescribeGroupsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3, 4: + case 4: + return V2_4_0_0 + case 3: return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_4_0_0 } - return V0_9_0_0 +} + +func (r *DescribeGroupsResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond } // GroupDescription contains each described group. diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go similarity index 92% rename from vendor/github.com/Shopify/sarama/describe_log_dirs_request.go rename to vendor/github.com/IBM/sarama/describe_log_dirs_request.go index c0bf04e04e..a6613c3200 100644 --- a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go +++ b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go @@ -82,6 +82,13 @@ func (r *DescribeLogDirsRequest) headerVersion() int16 { return 1 } +func (r *DescribeLogDirsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion { + if r.Version > 0 { + return V2_0_0_0 + } return V1_0_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go similarity index 95% rename from vendor/github.com/Shopify/sarama/describe_log_dirs_response.go rename to vendor/github.com/IBM/sarama/describe_log_dirs_response.go index 411da38ad2..41b4968dab 100644 --- a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go +++ b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go @@ -65,10 +65,21 @@ func (r *DescribeLogDirsResponse) headerVersion() int16 { return 0 } +func (r *DescribeLogDirsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion { + if r.Version > 0 { + return V2_0_0_0 + } return V1_0_0_0 } +func (r *DescribeLogDirsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} + type DescribeLogDirsResponseDirMetadata struct { ErrorCode KError diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go similarity index 94% rename from vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go rename to vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go index b5b59404bd..a6265de5f1 100644 --- a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go +++ b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go @@ -65,6 +65,10 @@ func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 { return 2 } +func (r *DescribeUserScramCredentialsRequest) isValidVersion() bool { + return r.Version == 0 +} + func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion { return V2_7_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go similarity index 95% rename from vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go rename to vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go index 2656c2faa1..a55c3f0ee5 100644 --- a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go +++ b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go @@ -163,6 +163,14 @@ func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 { return 2 } +func (r *DescribeUserScramCredentialsResponse) isValidVersion() bool { + return r.Version == 0 +} + func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion { return V2_7_0_0 } + +func (r *DescribeUserScramCredentialsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/IBM/sarama/dev.yml similarity index 100% rename from vendor/github.com/Shopify/sarama/dev.yml rename to vendor/github.com/IBM/sarama/dev.yml diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml similarity index 63% rename from vendor/github.com/Shopify/sarama/docker-compose.yml rename to vendor/github.com/IBM/sarama/docker-compose.yml index 5d3f038942..e916416d50 100644 --- a/vendor/github.com/Shopify/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -1,6 +1,7 @@ -version: '3.7' +version: '3.9' services: zookeeper-1: + hostname: 'zookeeper-1' image: 'docker.io/library/zookeeper:3.6.3' restart: always environment: @@ -12,6 +13,7 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-2: + hostname: 'zookeeper-2' image: 'docker.io/library/zookeeper:3.6.3' restart: always environment: @@ -23,6 +25,7 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-3: + hostname: 'zookeeper-3' image: 'docker.io/library/zookeeper:3.6.3' restart: always environment: @@ -34,13 +37,34 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' kafka-1: - image: 'sarama/fv-kafka' + hostname: 'kafka-1' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' build: context: . dockerfile: Dockerfile.kafka + args: + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + SCALA_VERSION: ${SCALA_VERSION:-2.13} + healthcheck: + test: + [ + 'CMD', + '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '--bootstrap-server', + 'kafka-1:9091', + ] + interval: 15s + timeout: 15s + retries: 10 + start_period: 360s + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.1.2} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' @@ -55,14 +79,37 @@ services: KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-2: - image: 'sarama/fv-kafka' + hostname: 'kafka-2' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' build: context: . dockerfile: Dockerfile.kafka + args: + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + SCALA_VERSION: ${SCALA_VERSION:-2.13} + healthcheck: + test: + [ + 'CMD', + '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '--bootstrap-server', + 'kafka-2:9091', + ] + interval: 15s + timeout: 15s + retries: 10 + start_period: 360s + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.1.2} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' @@ -77,14 +124,37 @@ services: KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-3: - image: 'sarama/fv-kafka' + hostname: 'kafka-3' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' build: context: . dockerfile: Dockerfile.kafka + args: + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + SCALA_VERSION: ${SCALA_VERSION:-2.13} + healthcheck: + test: + [ + 'CMD', + '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '--bootstrap-server', + 'kafka-3:9091', + ] + interval: 15s + timeout: 15s + retries: 10 + start_period: 360s + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.1.2} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' @@ -99,14 +169,37 @@ services: KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-4: - image: 'sarama/fv-kafka' + hostname: 'kafka-4' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' build: context: . dockerfile: Dockerfile.kafka + args: + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + SCALA_VERSION: ${SCALA_VERSION:-2.13} + healthcheck: + test: + [ + 'CMD', + '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '--bootstrap-server', + 'kafka-4:9091', + ] + interval: 15s + timeout: 15s + retries: 10 + start_period: 360s + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.1.2} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' @@ -121,14 +214,37 @@ services: KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-5: - image: 'sarama/fv-kafka' + hostname: 'kafka-5' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' build: context: . dockerfile: Dockerfile.kafka + args: + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + SCALA_VERSION: ${SCALA_VERSION:-2.13} + healthcheck: + test: + [ + 'CMD', + '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '--bootstrap-server', + 'kafka-5:9091', + ] + interval: 15s + timeout: 15s + retries: 10 + start_period: 360s + depends_on: + - zookeeper-1 + - zookeeper-2 + - zookeeper-3 + - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.1.2} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' @@ -143,8 +259,17 @@ services: KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true' KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false' + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" toxiproxy: + hostname: 'toxiproxy' image: 'ghcr.io/shopify/toxiproxy:2.4.0' + healthcheck: + test: ['CMD', '/toxiproxy-cli', 'l'] + interval: 15s + timeout: 15s + retries: 3 + start_period: 30s ports: # The tests themselves actually start the proxies on these ports - '29091:29091' @@ -152,5 +277,6 @@ services: - '29093:29093' - '29094:29094' - '29095:29095' + # This is the toxiproxy API port - '8474:8474' diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/IBM/sarama/encoder_decoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/encoder_decoder.go rename to vendor/github.com/IBM/sarama/encoder_decoder.go diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/IBM/sarama/end_txn_request.go similarity index 80% rename from vendor/github.com/Shopify/sarama/end_txn_request.go rename to vendor/github.com/IBM/sarama/end_txn_request.go index 6635425ddd..638099a5d8 100644 --- a/vendor/github.com/Shopify/sarama/end_txn_request.go +++ b/vendor/github.com/IBM/sarama/end_txn_request.go @@ -1,6 +1,7 @@ package sarama type EndTxnRequest struct { + Version int16 TransactionalID string ProducerID int64 ProducerEpoch int16 @@ -42,13 +43,24 @@ func (a *EndTxnRequest) key() int16 { } func (a *EndTxnRequest) version() int16 { - return 0 + return a.Version } func (r *EndTxnRequest) headerVersion() int16 { return 1 } +func (a *EndTxnRequest) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *EndTxnRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/IBM/sarama/end_txn_response.go similarity index 71% rename from vendor/github.com/Shopify/sarama/end_txn_response.go rename to vendor/github.com/IBM/sarama/end_txn_response.go index dd2a045048..54597df8c7 100644 --- a/vendor/github.com/Shopify/sarama/end_txn_response.go +++ b/vendor/github.com/IBM/sarama/end_txn_response.go @@ -5,6 +5,7 @@ import ( ) type EndTxnResponse struct { + Version int16 ThrottleTime time.Duration Err KError } @@ -36,13 +37,28 @@ func (e *EndTxnResponse) key() int16 { } func (e *EndTxnResponse) version() int16 { - return 0 + return e.Version } func (r *EndTxnResponse) headerVersion() int16 { return 0 } +func (e *EndTxnResponse) isValidVersion() bool { + return e.Version >= 0 && e.Version <= 2 +} + func (e *EndTxnResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch e.Version { + case 2: + return V2_7_0_0 + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *EndTxnResponse) throttleTime() time.Duration { + return r.ThrottleTime } diff --git a/vendor/github.com/IBM/sarama/entrypoint.sh b/vendor/github.com/IBM/sarama/entrypoint.sh new file mode 100755 index 0000000000..9fe9a44b1d --- /dev/null +++ b/vendor/github.com/IBM/sarama/entrypoint.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -eu +set -o pipefail + +KAFKA_VERSION="${KAFKA_VERSION:-3.6.0}" +KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" + +if [ ! -d "${KAFKA_HOME}" ]; then + echo 'Error: KAFKA_VERSION '$KAFKA_VERSION' not available in this image at '$KAFKA_HOME + exit 1 +fi + +cd "${KAFKA_HOME}" || exit 1 + +# discard all empty/commented lines from default config and copy to /tmp +sed -e '/^#/d' -e '/^$/d' config/server.properties >/tmp/server.properties + +echo "########################################################################" >>/tmp/server.properties + +# emulate kafka_configure_from_environment_variables from bitnami/bitnami-docker-kafka +for var in "${!KAFKA_CFG_@}"; do + key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' -e 's/.*/\L&/')" + sed -e '/^'$key'/d' -i"" /tmp/server.properties + value="${!var}" + echo "$key=$value" >>/tmp/server.properties +done + +sort /tmp/server.properties + +exec bin/kafka-server-start.sh /tmp/server.properties diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go similarity index 69% rename from vendor/github.com/Shopify/sarama/errors.go rename to vendor/github.com/IBM/sarama/errors.go index 27977f1662..2c431aecb0 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/IBM/sarama/errors.go @@ -79,7 +79,7 @@ var ErrTransactionNotReady = errors.New("transaction manager: transaction is not // ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer. var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer") -// ErrTransitionNotAllowed when txnmgr state transiion is not valid. +// ErrTransitionNotAllowed when txnmgr state transition is not valid. var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted") // ErrCannotTransitionNilError when transition is attempted with an nil error. @@ -89,7 +89,7 @@ var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transi var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response") // MultiErrorFormat specifies the formatter applied to format multierrors. The -// default implementation is a consensed version of the hashicorp/go-multierror +// default implementation is a condensed version of the hashicorp/go-multierror // default one var MultiErrorFormat multierror.ErrorFormatFunc = func(es []error) string { if len(es) == 1 { @@ -173,98 +173,98 @@ type KError int16 // Numeric error codes returned by the Kafka server. const ( - ErrNoError KError = 0 - ErrUnknown KError = -1 - ErrOffsetOutOfRange KError = 1 - ErrInvalidMessage KError = 2 - ErrUnknownTopicOrPartition KError = 3 - ErrInvalidMessageSize KError = 4 - ErrLeaderNotAvailable KError = 5 - ErrNotLeaderForPartition KError = 6 - ErrRequestTimedOut KError = 7 - ErrBrokerNotAvailable KError = 8 - ErrReplicaNotAvailable KError = 9 - ErrMessageSizeTooLarge KError = 10 - ErrStaleControllerEpochCode KError = 11 - ErrOffsetMetadataTooLarge KError = 12 - ErrNetworkException KError = 13 - ErrOffsetsLoadInProgress KError = 14 - ErrConsumerCoordinatorNotAvailable KError = 15 - ErrNotCoordinatorForConsumer KError = 16 - ErrInvalidTopic KError = 17 - ErrMessageSetSizeTooLarge KError = 18 - ErrNotEnoughReplicas KError = 19 - ErrNotEnoughReplicasAfterAppend KError = 20 - ErrInvalidRequiredAcks KError = 21 - ErrIllegalGeneration KError = 22 - ErrInconsistentGroupProtocol KError = 23 - ErrInvalidGroupId KError = 24 - ErrUnknownMemberId KError = 25 - ErrInvalidSessionTimeout KError = 26 - ErrRebalanceInProgress KError = 27 - ErrInvalidCommitOffsetSize KError = 28 - ErrTopicAuthorizationFailed KError = 29 - ErrGroupAuthorizationFailed KError = 30 - ErrClusterAuthorizationFailed KError = 31 - ErrInvalidTimestamp KError = 32 - ErrUnsupportedSASLMechanism KError = 33 - ErrIllegalSASLState KError = 34 - ErrUnsupportedVersion KError = 35 - ErrTopicAlreadyExists KError = 36 - ErrInvalidPartitions KError = 37 - ErrInvalidReplicationFactor KError = 38 - ErrInvalidReplicaAssignment KError = 39 - ErrInvalidConfig KError = 40 - ErrNotController KError = 41 - ErrInvalidRequest KError = 42 - ErrUnsupportedForMessageFormat KError = 43 - ErrPolicyViolation KError = 44 - ErrOutOfOrderSequenceNumber KError = 45 - ErrDuplicateSequenceNumber KError = 46 - ErrInvalidProducerEpoch KError = 47 - ErrInvalidTxnState KError = 48 - ErrInvalidProducerIDMapping KError = 49 - ErrInvalidTransactionTimeout KError = 50 - ErrConcurrentTransactions KError = 51 - ErrTransactionCoordinatorFenced KError = 52 - ErrTransactionalIDAuthorizationFailed KError = 53 - ErrSecurityDisabled KError = 54 - ErrOperationNotAttempted KError = 55 - ErrKafkaStorageError KError = 56 - ErrLogDirNotFound KError = 57 - ErrSASLAuthenticationFailed KError = 58 - ErrUnknownProducerID KError = 59 - ErrReassignmentInProgress KError = 60 - ErrDelegationTokenAuthDisabled KError = 61 - ErrDelegationTokenNotFound KError = 62 - ErrDelegationTokenOwnerMismatch KError = 63 - ErrDelegationTokenRequestNotAllowed KError = 64 - ErrDelegationTokenAuthorizationFailed KError = 65 - ErrDelegationTokenExpired KError = 66 - ErrInvalidPrincipalType KError = 67 - ErrNonEmptyGroup KError = 68 - ErrGroupIDNotFound KError = 69 - ErrFetchSessionIDNotFound KError = 70 - ErrInvalidFetchSessionEpoch KError = 71 - ErrListenerNotFound KError = 72 - ErrTopicDeletionDisabled KError = 73 - ErrFencedLeaderEpoch KError = 74 - ErrUnknownLeaderEpoch KError = 75 - ErrUnsupportedCompressionType KError = 76 - ErrStaleBrokerEpoch KError = 77 - ErrOffsetNotAvailable KError = 78 - ErrMemberIdRequired KError = 79 - ErrPreferredLeaderNotAvailable KError = 80 - ErrGroupMaxSizeReached KError = 81 - ErrFencedInstancedId KError = 82 - ErrEligibleLeadersNotAvailable KError = 83 - ErrElectionNotNeeded KError = 84 - ErrNoReassignmentInProgress KError = 85 - ErrGroupSubscribedToTopic KError = 86 - ErrInvalidRecord KError = 87 - ErrUnstableOffsetCommit KError = 88 - ErrThrottlingQuotaExceeded KError = 89 - ErrProducerFenced KError = 90 + ErrUnknown KError = -1 // Errors.UNKNOWN_SERVER_ERROR + ErrNoError KError = 0 // Errors.NONE + ErrOffsetOutOfRange KError = 1 // Errors.OFFSET_OUT_OF_RANGE + ErrInvalidMessage KError = 2 // Errors.CORRUPT_MESSAGE + ErrUnknownTopicOrPartition KError = 3 // Errors.UNKNOWN_TOPIC_OR_PARTITION + ErrInvalidMessageSize KError = 4 // Errors.INVALID_FETCH_SIZE + ErrLeaderNotAvailable KError = 5 // Errors.LEADER_NOT_AVAILABLE + ErrNotLeaderForPartition KError = 6 // Errors.NOT_LEADER_OR_FOLLOWER + ErrRequestTimedOut KError = 7 // Errors.REQUEST_TIMED_OUT + ErrBrokerNotAvailable KError = 8 // Errors.BROKER_NOT_AVAILABLE + ErrReplicaNotAvailable KError = 9 // Errors.REPLICA_NOT_AVAILABLE + ErrMessageSizeTooLarge KError = 10 // Errors.MESSAGE_TOO_LARGE + ErrStaleControllerEpochCode KError = 11 // Errors.STALE_CONTROLLER_EPOCH + ErrOffsetMetadataTooLarge KError = 12 // Errors.OFFSET_METADATA_TOO_LARGE + ErrNetworkException KError = 13 // Errors.NETWORK_EXCEPTION + ErrOffsetsLoadInProgress KError = 14 // Errors.COORDINATOR_LOAD_IN_PROGRESS + ErrConsumerCoordinatorNotAvailable KError = 15 // Errors.COORDINATOR_NOT_AVAILABLE + ErrNotCoordinatorForConsumer KError = 16 // Errors.NOT_COORDINATOR + ErrInvalidTopic KError = 17 // Errors.INVALID_TOPIC_EXCEPTION + ErrMessageSetSizeTooLarge KError = 18 // Errors.RECORD_LIST_TOO_LARGE + ErrNotEnoughReplicas KError = 19 // Errors.NOT_ENOUGH_REPLICAS + ErrNotEnoughReplicasAfterAppend KError = 20 // Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND + ErrInvalidRequiredAcks KError = 21 // Errors.INVALID_REQUIRED_ACKS + ErrIllegalGeneration KError = 22 // Errors.ILLEGAL_GENERATION + ErrInconsistentGroupProtocol KError = 23 // Errors.INCONSISTENT_GROUP_PROTOCOL + ErrInvalidGroupId KError = 24 // Errors.INVALID_GROUP_ID + ErrUnknownMemberId KError = 25 // Errors.UNKNOWN_MEMBER_ID + ErrInvalidSessionTimeout KError = 26 // Errors.INVALID_SESSION_TIMEOUT + ErrRebalanceInProgress KError = 27 // Errors.REBALANCE_IN_PROGRESS + ErrInvalidCommitOffsetSize KError = 28 // Errors.INVALID_COMMIT_OFFSET_SIZE + ErrTopicAuthorizationFailed KError = 29 // Errors.TOPIC_AUTHORIZATION_FAILED + ErrGroupAuthorizationFailed KError = 30 // Errors.GROUP_AUTHORIZATION_FAILED + ErrClusterAuthorizationFailed KError = 31 // Errors.CLUSTER_AUTHORIZATION_FAILED + ErrInvalidTimestamp KError = 32 // Errors.INVALID_TIMESTAMP + ErrUnsupportedSASLMechanism KError = 33 // Errors.UNSUPPORTED_SASL_MECHANISM + ErrIllegalSASLState KError = 34 // Errors.ILLEGAL_SASL_STATE + ErrUnsupportedVersion KError = 35 // Errors.UNSUPPORTED_VERSION + ErrTopicAlreadyExists KError = 36 // Errors.TOPIC_ALREADY_EXISTS + ErrInvalidPartitions KError = 37 // Errors.INVALID_PARTITIONS + ErrInvalidReplicationFactor KError = 38 // Errors.INVALID_REPLICATION_FACTOR + ErrInvalidReplicaAssignment KError = 39 // Errors.INVALID_REPLICA_ASSIGNMENT + ErrInvalidConfig KError = 40 // Errors.INVALID_CONFIG + ErrNotController KError = 41 // Errors.NOT_CONTROLLER + ErrInvalidRequest KError = 42 // Errors.INVALID_REQUEST + ErrUnsupportedForMessageFormat KError = 43 // Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT + ErrPolicyViolation KError = 44 // Errors.POLICY_VIOLATION + ErrOutOfOrderSequenceNumber KError = 45 // Errors.OUT_OF_ORDER_SEQUENCE_NUMBER + ErrDuplicateSequenceNumber KError = 46 // Errors.DUPLICATE_SEQUENCE_NUMBER + ErrInvalidProducerEpoch KError = 47 // Errors.INVALID_PRODUCER_EPOCH + ErrInvalidTxnState KError = 48 // Errors.INVALID_TXN_STATE + ErrInvalidProducerIDMapping KError = 49 // Errors.INVALID_PRODUCER_ID_MAPPING + ErrInvalidTransactionTimeout KError = 50 // Errors.INVALID_TRANSACTION_TIMEOUT + ErrConcurrentTransactions KError = 51 // Errors.CONCURRENT_TRANSACTIONS + ErrTransactionCoordinatorFenced KError = 52 // Errors.TRANSACTION_COORDINATOR_FENCED + ErrTransactionalIDAuthorizationFailed KError = 53 // Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED + ErrSecurityDisabled KError = 54 // Errors.SECURITY_DISABLED + ErrOperationNotAttempted KError = 55 // Errors.OPERATION_NOT_ATTEMPTED + ErrKafkaStorageError KError = 56 // Errors.KAFKA_STORAGE_ERROR + ErrLogDirNotFound KError = 57 // Errors.LOG_DIR_NOT_FOUND + ErrSASLAuthenticationFailed KError = 58 // Errors.SASL_AUTHENTICATION_FAILED + ErrUnknownProducerID KError = 59 // Errors.UNKNOWN_PRODUCER_ID + ErrReassignmentInProgress KError = 60 // Errors.REASSIGNMENT_IN_PROGRESS + ErrDelegationTokenAuthDisabled KError = 61 // Errors.DELEGATION_TOKEN_AUTH_DISABLED + ErrDelegationTokenNotFound KError = 62 // Errors.DELEGATION_TOKEN_NOT_FOUND + ErrDelegationTokenOwnerMismatch KError = 63 // Errors.DELEGATION_TOKEN_OWNER_MISMATCH + ErrDelegationTokenRequestNotAllowed KError = 64 // Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED + ErrDelegationTokenAuthorizationFailed KError = 65 // Errors.DELEGATION_TOKEN_AUTHORIZATION_FAILED + ErrDelegationTokenExpired KError = 66 // Errors.DELEGATION_TOKEN_EXPIRED + ErrInvalidPrincipalType KError = 67 // Errors.INVALID_PRINCIPAL_TYPE + ErrNonEmptyGroup KError = 68 // Errors.NON_EMPTY_GROUP + ErrGroupIDNotFound KError = 69 // Errors.GROUP_ID_NOT_FOUND + ErrFetchSessionIDNotFound KError = 70 // Errors.FETCH_SESSION_ID_NOT_FOUND + ErrInvalidFetchSessionEpoch KError = 71 // Errors.INVALID_FETCH_SESSION_EPOCH + ErrListenerNotFound KError = 72 // Errors.LISTENER_NOT_FOUND + ErrTopicDeletionDisabled KError = 73 // Errors.TOPIC_DELETION_DISABLED + ErrFencedLeaderEpoch KError = 74 // Errors.FENCED_LEADER_EPOCH + ErrUnknownLeaderEpoch KError = 75 // Errors.UNKNOWN_LEADER_EPOCH + ErrUnsupportedCompressionType KError = 76 // Errors.UNSUPPORTED_COMPRESSION_TYPE + ErrStaleBrokerEpoch KError = 77 // Errors.STALE_BROKER_EPOCH + ErrOffsetNotAvailable KError = 78 // Errors.OFFSET_NOT_AVAILABLE + ErrMemberIdRequired KError = 79 // Errors.MEMBER_ID_REQUIRED + ErrPreferredLeaderNotAvailable KError = 80 // Errors.PREFERRED_LEADER_NOT_AVAILABLE + ErrGroupMaxSizeReached KError = 81 // Errors.GROUP_MAX_SIZE_REACHED + ErrFencedInstancedId KError = 82 // Errors.FENCED_INSTANCE_ID + ErrEligibleLeadersNotAvailable KError = 83 // Errors.ELIGIBLE_LEADERS_NOT_AVAILABLE + ErrElectionNotNeeded KError = 84 // Errors.ELECTION_NOT_NEEDED + ErrNoReassignmentInProgress KError = 85 // Errors.NO_REASSIGNMENT_IN_PROGRESS + ErrGroupSubscribedToTopic KError = 86 // Errors.GROUP_SUBSCRIBED_TO_TOPIC + ErrInvalidRecord KError = 87 // Errors.INVALID_RECORD + ErrUnstableOffsetCommit KError = 88 // Errors.UNSTABLE_OFFSET_COMMIT + ErrThrottlingQuotaExceeded KError = 89 // Errors.THROTTLING_QUOTA_EXCEEDED + ErrProducerFenced KError = 90 // Errors.PRODUCER_FENCED ) func (err KError) Error() string { @@ -302,7 +302,7 @@ func (err KError) Error() string { case ErrNetworkException: return "kafka server: The server disconnected before a response was received" case ErrOffsetsLoadInProgress: - return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition" + return "kafka server: The coordinator is still loading offsets and cannot currently process requests" case ErrConsumerCoordinatorNotAvailable: return "kafka server: Offset's topic has not yet been created" case ErrNotCoordinatorForConsumer: diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/IBM/sarama/fetch_request.go similarity index 95% rename from vendor/github.com/Shopify/sarama/fetch_request.go rename to vendor/github.com/IBM/sarama/fetch_request.go index 63190a2b0d..a5314b55c8 100644 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ b/vendor/github.com/IBM/sarama/fetch_request.go @@ -1,5 +1,7 @@ package sarama +import "fmt" + type fetchRequestBlock struct { Version int16 // currentLeaderEpoch contains the current leader epoch of the partition. @@ -241,6 +243,9 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { if err != nil { return err } + if partitionCount < 0 { + return fmt.Errorf("partitionCount %d is invalid", partitionCount) + } r.forgotten[topic] = make([]int32, partitionCount) for j := 0; j < partitionCount; j++ { @@ -275,34 +280,38 @@ func (r *FetchRequest) headerVersion() int16 { return 1 } +func (r *FetchRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 11 +} + func (r *FetchRequest) requiredVersion() KafkaVersion { switch r.Version { - case 0: - return MinVersion - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_10_1_0 - case 4, 5: - return V0_11_0_0 - case 6: - return V1_0_0_0 - case 7: - return V1_1_0_0 - case 8: - return V2_0_0_0 - case 9, 10: - return V2_1_0_0 case 11: return V2_3_0_0 + case 9, 10: + return V2_1_0_0 + case 8: + return V2_0_0_0 + case 7: + return V1_1_0_0 + case 6: + return V1_0_0_0 + case 4, 5: + return V0_11_0_0 + case 3: + return V0_10_1_0 + case 2: + return V0_10_0_0 + case 1: + return V0_9_0_0 + case 0: + return V0_8_2_0 default: - return MaxVersion + return V2_3_0_0 } } -func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { +func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32, leaderEpoch int32) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*fetchRequestBlock) } @@ -320,7 +329,7 @@ func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int tmp.maxBytes = maxBytes tmp.fetchOffset = fetchOffset if r.Version >= 9 { - tmp.currentLeaderEpoch = int32(-1) + tmp.currentLeaderEpoch = leaderEpoch } r.blocks[topic][partitionID] = tmp diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/IBM/sarama/fetch_response.go similarity index 98% rename from vendor/github.com/Shopify/sarama/fetch_response.go rename to vendor/github.com/IBM/sarama/fetch_response.go index c8ad6046ab..02e8ca4736 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/IBM/sarama/fetch_response.go @@ -8,7 +8,10 @@ import ( "github.com/rcrowley/go-metrics" ) -const invalidPreferredReplicaID = -1 +const ( + invalidLeaderEpoch = -1 + invalidPreferredReplicaID = -1 +) type AbortedTransaction struct { // ProducerID contains the producer id associated with the aborted transaction. @@ -383,33 +386,41 @@ func (r *FetchResponse) headerVersion() int16 { return 0 } +func (r *FetchResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 11 +} + func (r *FetchResponse) requiredVersion() KafkaVersion { switch r.Version { - case 0: - return MinVersion - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_10_1_0 - case 4, 5: - return V0_11_0_0 - case 6: - return V1_0_0_0 - case 7: - return V1_1_0_0 - case 8: - return V2_0_0_0 - case 9, 10: - return V2_1_0_0 case 11: return V2_3_0_0 + case 9, 10: + return V2_1_0_0 + case 8: + return V2_0_0_0 + case 7: + return V1_1_0_0 + case 6: + return V1_0_0_0 + case 4, 5: + return V0_11_0_0 + case 3: + return V0_10_1_0 + case 2: + return V0_10_0_0 + case 1: + return V0_9_0_0 + case 0: + return V0_8_2_0 default: - return MaxVersion + return V2_3_0_0 } } +func (r *FetchResponse) throttleTime() time.Duration { + return r.ThrottleTime +} + func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { if r.Blocks == nil { return nil diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/IBM/sarama/find_coordinator_request.go similarity index 90% rename from vendor/github.com/Shopify/sarama/find_coordinator_request.go rename to vendor/github.com/IBM/sarama/find_coordinator_request.go index 597bcbf786..4758835a1c 100644 --- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go +++ b/vendor/github.com/IBM/sarama/find_coordinator_request.go @@ -55,8 +55,14 @@ func (r *FindCoordinatorRequest) headerVersion() int16 { return 1 } +func (f *FindCoordinatorRequest) isValidVersion() bool { + return f.Version >= 0 && f.Version <= 2 +} + func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { switch f.Version { + case 2: + return V2_0_0_0 case 1: return V0_11_0_0 default: diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/IBM/sarama/find_coordinator_response.go similarity index 89% rename from vendor/github.com/Shopify/sarama/find_coordinator_response.go rename to vendor/github.com/IBM/sarama/find_coordinator_response.go index 83a648ad4a..11b9920d02 100644 --- a/vendor/github.com/Shopify/sarama/find_coordinator_response.go +++ b/vendor/github.com/IBM/sarama/find_coordinator_response.go @@ -86,11 +86,21 @@ func (r *FindCoordinatorResponse) headerVersion() int16 { return 0 } +func (f *FindCoordinatorResponse) isValidVersion() bool { + return f.Version >= 0 && f.Version <= 2 +} + func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { switch f.Version { + case 2: + return V2_0_0_0 case 1: return V0_11_0_0 default: return V0_8_2_0 } } + +func (r *FindCoordinatorResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/IBM/sarama/gssapi_kerberos.go similarity index 99% rename from vendor/github.com/Shopify/sarama/gssapi_kerberos.go rename to vendor/github.com/IBM/sarama/gssapi_kerberos.go index ab8b70196f..8abbcdc384 100644 --- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go +++ b/vendor/github.com/IBM/sarama/gssapi_kerberos.go @@ -23,6 +23,7 @@ const ( GSS_API_GENERIC_TAG = 0x60 KRB5_USER_AUTH = 1 KRB5_KEYTAB_AUTH = 2 + KRB5_CCACHE_AUTH = 3 GSS_API_INITIAL = 1 GSS_API_VERIFY = 2 GSS_API_FINISH = 3 @@ -31,6 +32,7 @@ const ( type GSSAPIConfig struct { AuthType int KeyTabPath string + CCachePath string KerberosConfigPath string ServiceName string Username string diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/IBM/sarama/heartbeat_request.go similarity index 83% rename from vendor/github.com/Shopify/sarama/heartbeat_request.go rename to vendor/github.com/IBM/sarama/heartbeat_request.go index 511910e712..9f740f26c6 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_request.go +++ b/vendor/github.com/IBM/sarama/heartbeat_request.go @@ -60,10 +60,21 @@ func (r *HeartbeatRequest) headerVersion() int16 { return 1 } +func (r *HeartbeatRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *HeartbeatRequest) requiredVersion() KafkaVersion { - switch { - case r.Version >= 3: + switch r.Version { + case 3: + return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_8_2_0 + default: return V2_3_0_0 } - return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/IBM/sarama/heartbeat_response.go similarity index 71% rename from vendor/github.com/Shopify/sarama/heartbeat_response.go rename to vendor/github.com/IBM/sarama/heartbeat_response.go index 95ef97f47a..a58718d7b5 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_response.go +++ b/vendor/github.com/IBM/sarama/heartbeat_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type HeartbeatResponse struct { Version int16 ThrottleTime int32 @@ -43,10 +45,25 @@ func (r *HeartbeatResponse) headerVersion() int16 { return 0 } +func (r *HeartbeatResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *HeartbeatResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3: + case 3: + return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_8_2_0 + default: return V2_3_0_0 } - return V0_9_0_0 +} + +func (r *HeartbeatResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTime) * time.Millisecond } diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go similarity index 96% rename from vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go rename to vendor/github.com/IBM/sarama/incremental_alter_configs_request.go index c4d05a9720..b1b490a282 100644 --- a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go +++ b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go @@ -11,6 +11,7 @@ const ( // IncrementalAlterConfigsRequest is an incremental alter config request type type IncrementalAlterConfigsRequest struct { + Version int16 Resources []*IncrementalAlterConfigsResource ValidateOnly bool } @@ -161,13 +162,17 @@ func (a *IncrementalAlterConfigsRequest) key() int16 { } func (a *IncrementalAlterConfigsRequest) version() int16 { - return 0 + return a.Version } func (a *IncrementalAlterConfigsRequest) headerVersion() int16 { return 1 } +func (a *IncrementalAlterConfigsRequest) isValidVersion() bool { + return a.Version == 0 +} + func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion { return V2_3_0_0 } diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go similarity index 86% rename from vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go rename to vendor/github.com/IBM/sarama/incremental_alter_configs_response.go index 3e8c4500c3..3a2df2f606 100644 --- a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go +++ b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go @@ -4,6 +4,7 @@ import "time" // IncrementalAlterConfigsResponse is a response type for incremental alter config type IncrementalAlterConfigsResponse struct { + Version int16 ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } @@ -54,13 +55,21 @@ func (a *IncrementalAlterConfigsResponse) key() int16 { } func (a *IncrementalAlterConfigsResponse) version() int16 { - return 0 + return a.Version } func (a *IncrementalAlterConfigsResponse) headerVersion() int16 { return 0 } +func (a *IncrementalAlterConfigsResponse) isValidVersion() bool { + return a.Version == 0 +} + func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion { return V2_3_0_0 } + +func (r *IncrementalAlterConfigsResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/IBM/sarama/init_producer_id_request.go similarity index 91% rename from vendor/github.com/Shopify/sarama/init_producer_id_request.go rename to vendor/github.com/IBM/sarama/init_producer_id_request.go index 33ce5fa41c..dee50fb9fc 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go +++ b/vendor/github.com/IBM/sarama/init_producer_id_request.go @@ -84,19 +84,23 @@ func (i *InitProducerIDRequest) headerVersion() int16 { return 1 } +func (i *InitProducerIDRequest) isValidVersion() bool { + return i.Version >= 0 && i.Version <= 4 +} + func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { switch i.Version { - case 2: - // Added tagged fields - return V2_4_0_0 + case 4: + return V2_7_0_0 case 3: - // Added ProducerID/Epoch return V2_5_0_0 - case 0: - fallthrough + case 2: + return V2_4_0_0 case 1: - fallthrough - default: + return V2_0_0_0 + case 0: return V0_11_0_0 + default: + return V2_7_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/IBM/sarama/init_producer_id_response.go similarity index 85% rename from vendor/github.com/Shopify/sarama/init_producer_id_response.go rename to vendor/github.com/IBM/sarama/init_producer_id_response.go index 0060701899..256077189e 100644 --- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go +++ b/vendor/github.com/IBM/sarama/init_producer_id_response.go @@ -69,17 +69,25 @@ func (i *InitProducerIDResponse) headerVersion() int16 { return 0 } +func (i *InitProducerIDResponse) isValidVersion() bool { + return i.Version >= 0 && i.Version <= 4 +} + func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { switch i.Version { - case 2: - fallthrough + case 4: + return V2_7_0_0 case 3: + return V2_5_0_0 + case 2: return V2_4_0_0 - case 0: - fallthrough case 1: - fallthrough + return V2_0_0_0 default: return V0_11_0_0 } } + +func (r *InitProducerIDResponse) throttleTime() time.Duration { + return r.ThrottleTime +} diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/IBM/sarama/interceptors.go similarity index 100% rename from vendor/github.com/Shopify/sarama/interceptors.go rename to vendor/github.com/IBM/sarama/interceptors.go diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/IBM/sarama/join_group_request.go similarity index 70% rename from vendor/github.com/Shopify/sarama/join_group_request.go rename to vendor/github.com/IBM/sarama/join_group_request.go index 432338cd59..3ab69c4984 100644 --- a/vendor/github.com/Shopify/sarama/join_group_request.go +++ b/vendor/github.com/IBM/sarama/join_group_request.go @@ -1,7 +1,9 @@ package sarama type GroupProtocol struct { - Name string + // Name contains the protocol name. + Name string + // Metadata contains the protocol metadata. Metadata []byte } @@ -25,14 +27,30 @@ func (p *GroupProtocol) encode(pe packetEncoder) (err error) { } type JoinGroupRequest struct { - Version int16 - GroupId string - SessionTimeout int32 - RebalanceTimeout int32 - MemberId string - GroupInstanceId *string - ProtocolType string - GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols + // Version defines the protocol version to use for encode and decode + Version int16 + // GroupId contains the group identifier. + GroupId string + // SessionTimeout specifies that the coordinator should consider the consumer + // dead if it receives no heartbeat after this timeout in milliseconds. + SessionTimeout int32 + // RebalanceTimeout contains the maximum time in milliseconds that the + // coordinator will wait for each member to rejoin when rebalancing the + // group. + RebalanceTimeout int32 + // MemberId contains the member id assigned by the group coordinator. + MemberId string + // GroupInstanceId contains the unique identifier of the consumer instance + // provided by end user. + GroupInstanceId *string + // ProtocolType contains the unique name the for class of protocols + // implemented by the group we want to join. + ProtocolType string + // GroupProtocols contains the list of protocols that the member supports. + // deprecated; use OrderedGroupProtocols + GroupProtocols map[string][]byte + // OrderedGroupProtocols contains an ordered list of protocols that the member + // supports. OrderedGroupProtocols []*GroupProtocol } @@ -150,16 +168,26 @@ func (r *JoinGroupRequest) headerVersion() int16 { return 1 } +func (r *JoinGroupRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 5 +} + func (r *JoinGroupRequest) requiredVersion() KafkaVersion { switch r.Version { - case 4, 5: + case 5: return V2_3_0_0 - case 2, 3: + case 4: + return V2_2_0_0 + case 3: + return V2_0_0_0 + case 2: return V0_11_0_0 case 1: return V0_10_1_0 + case 0: + return V0_10_0_0 default: - return V0_9_0_0 + return V2_3_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/IBM/sarama/join_group_response.go similarity index 68% rename from vendor/github.com/Shopify/sarama/join_group_response.go rename to vendor/github.com/IBM/sarama/join_group_response.go index d8aa1f0023..643fddc6b5 100644 --- a/vendor/github.com/Shopify/sarama/join_group_response.go +++ b/vendor/github.com/IBM/sarama/join_group_response.go @@ -1,20 +1,35 @@ package sarama +import "time" + type JoinGroupResponse struct { - Version int16 - ThrottleTime int32 - Err KError - GenerationId int32 + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTime contains the duration for which the request was throttled due + // to a quota violation, or zero if the request did not violate any quota. + ThrottleTime int32 + // Err contains the error code, or 0 if there was no error. + Err KError + // GenerationId contains the generation ID of the group. + GenerationId int32 + // GroupProtocol contains the group protocol selected by the coordinator. GroupProtocol string - LeaderId string - MemberId string - Members []GroupMember + // LeaderId contains the leader of the group. + LeaderId string + // MemberId contains the member ID assigned by the group coordinator. + MemberId string + // Members contains the per-group-member information. + Members []GroupMember } type GroupMember struct { - MemberId string + // MemberId contains the group member ID. + MemberId string + // GroupInstanceId contains the unique identifier of the consumer instance + // provided by end user. GroupInstanceId *string - Metadata []byte + // Metadata contains the group member metadata. + Metadata []byte } func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { @@ -145,15 +160,29 @@ func (r *JoinGroupResponse) headerVersion() int16 { return 0 } +func (r *JoinGroupResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 5 +} + func (r *JoinGroupResponse) requiredVersion() KafkaVersion { switch r.Version { - case 3, 4, 5: + case 5: return V2_3_0_0 + case 4: + return V2_2_0_0 + case 3: + return V2_0_0_0 case 2: return V0_11_0_0 case 1: return V0_10_1_0 + case 0: + return V0_10_0_0 default: - return V0_9_0_0 + return V2_3_0_0 } } + +func (r *JoinGroupResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTime) * time.Millisecond +} diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/IBM/sarama/kerberos_client.go similarity index 79% rename from vendor/github.com/Shopify/sarama/kerberos_client.go rename to vendor/github.com/IBM/sarama/kerberos_client.go index 01a53193bb..289126879b 100644 --- a/vendor/github.com/Shopify/sarama/kerberos_client.go +++ b/vendor/github.com/IBM/sarama/kerberos_client.go @@ -3,6 +3,7 @@ package sarama import ( krb5client "github.com/jcmturner/gokrb5/v8/client" krb5config "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/credentials" "github.com/jcmturner/gokrb5/v8/keytab" "github.com/jcmturner/gokrb5/v8/types" ) @@ -32,13 +33,23 @@ func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) { var client *krb5client.Client - if config.AuthType == KRB5_KEYTAB_AUTH { + switch config.AuthType { + case KRB5_KEYTAB_AUTH: kt, err := keytab.Load(config.KeyTabPath) if err != nil { return nil, err } client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) - } else { + case KRB5_CCACHE_AUTH: + cc, err := credentials.LoadCCache(config.CCachePath) + if err != nil { + return nil, err + } + client, err = krb5client.NewFromCCache(cc, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) + if err != nil { + return nil, err + } + default: client = krb5client.NewWithPassword(config.Username, config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) } diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/IBM/sarama/leave_group_request.go similarity index 88% rename from vendor/github.com/Shopify/sarama/leave_group_request.go rename to vendor/github.com/IBM/sarama/leave_group_request.go index 741b7290a8..9222e51049 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_request.go +++ b/vendor/github.com/IBM/sarama/leave_group_request.go @@ -81,10 +81,21 @@ func (r *LeaveGroupRequest) headerVersion() int16 { return 1 } +func (r *LeaveGroupRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3: - return V2_3_0_0 + case 3: + return V2_4_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_4_0_0 } - return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/IBM/sarama/leave_group_response.go similarity index 83% rename from vendor/github.com/Shopify/sarama/leave_group_response.go rename to vendor/github.com/IBM/sarama/leave_group_response.go index 18ed357e83..f24c24867e 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_response.go +++ b/vendor/github.com/IBM/sarama/leave_group_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type MemberResponse struct { MemberId string GroupInstanceId *string @@ -83,10 +85,25 @@ func (r *LeaveGroupResponse) headerVersion() int16 { return 0 } +func (r *LeaveGroupResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3: - return V2_3_0_0 + case 3: + return V2_4_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_4_0_0 } - return V0_9_0_0 +} + +func (r *LeaveGroupResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTime) * time.Millisecond } diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/IBM/sarama/length_field.go similarity index 100% rename from vendor/github.com/Shopify/sarama/length_field.go rename to vendor/github.com/IBM/sarama/length_field.go diff --git a/vendor/github.com/IBM/sarama/list_groups_request.go b/vendor/github.com/IBM/sarama/list_groups_request.go new file mode 100644 index 0000000000..4d5f9e40d1 --- /dev/null +++ b/vendor/github.com/IBM/sarama/list_groups_request.go @@ -0,0 +1,82 @@ +package sarama + +type ListGroupsRequest struct { + Version int16 + StatesFilter []string // version 4 or later +} + +func (r *ListGroupsRequest) encode(pe packetEncoder) error { + if r.Version >= 4 { + pe.putCompactArrayLength(len(r.StatesFilter)) + for _, filter := range r.StatesFilter { + err := pe.putCompactString(filter) + if err != nil { + return err + } + } + } + if r.Version >= 3 { + pe.putEmptyTaggedFieldArray() + } + return nil +} + +func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version >= 4 { + filterLen, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if filterLen > 0 { + r.StatesFilter = make([]string, filterLen) + for i := 0; i < filterLen; i++ { + if r.StatesFilter[i], err = pd.getCompactString(); err != nil { + return err + } + } + } + } + if r.Version >= 3 { + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil +} + +func (r *ListGroupsRequest) key() int16 { + return 16 +} + +func (r *ListGroupsRequest) version() int16 { + return r.Version +} + +func (r *ListGroupsRequest) headerVersion() int16 { + if r.Version >= 3 { + return 2 + } + return 1 +} + +func (r *ListGroupsRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + +func (r *ListGroupsRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 4: + return V2_6_0_0 + case 3: + return V2_4_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_6_0_0 + } +} diff --git a/vendor/github.com/IBM/sarama/list_groups_response.go b/vendor/github.com/IBM/sarama/list_groups_response.go new file mode 100644 index 0000000000..62948c31fc --- /dev/null +++ b/vendor/github.com/IBM/sarama/list_groups_response.go @@ -0,0 +1,173 @@ +package sarama + +type ListGroupsResponse struct { + Version int16 + ThrottleTime int32 + Err KError + Groups map[string]string + GroupsData map[string]GroupData // version 4 or later +} + +type GroupData struct { + GroupState string // version 4 or later +} + +func (r *ListGroupsResponse) encode(pe packetEncoder) error { + if r.Version >= 1 { + pe.putInt32(r.ThrottleTime) + } + + pe.putInt16(int16(r.Err)) + + if r.Version <= 2 { + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + for groupId, protocolType := range r.Groups { + if err := pe.putString(groupId); err != nil { + return err + } + if err := pe.putString(protocolType); err != nil { + return err + } + } + } else { + pe.putCompactArrayLength(len(r.Groups)) + for groupId, protocolType := range r.Groups { + if err := pe.putCompactString(groupId); err != nil { + return err + } + if err := pe.putCompactString(protocolType); err != nil { + return err + } + + if r.Version >= 4 { + groupData := r.GroupsData[groupId] + if err := pe.putCompactString(groupData.GroupState); err != nil { + return err + } + } + } + } + + return nil +} + +func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { + r.Version = version + if r.Version >= 1 { + var err error + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return err + } + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + var n int + if r.Version <= 2 { + n, err = pd.getArrayLength() + } else { + n, err = pd.getCompactArrayLength() + } + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == 0 { + r.Groups = make(map[string]string) + if r.Version >= 4 { + r.GroupsData = make(map[string]GroupData) + } + } + + var groupId, protocolType string + if r.Version <= 2 { + groupId, err = pd.getString() + if err != nil { + return err + } + protocolType, err = pd.getString() + if err != nil { + return err + } + } else { + groupId, err = pd.getCompactString() + if err != nil { + return err + } + protocolType, err = pd.getCompactString() + if err != nil { + return err + } + } + + r.Groups[groupId] = protocolType + + if r.Version >= 4 { + groupState, err := pd.getCompactString() + if err != nil { + return err + } + r.GroupsData[groupId] = GroupData{ + GroupState: groupState, + } + } + + if r.Version >= 3 { + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if r.Version >= 3 { + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + return nil +} + +func (r *ListGroupsResponse) key() int16 { + return 16 +} + +func (r *ListGroupsResponse) version() int16 { + return r.Version +} + +func (r *ListGroupsResponse) headerVersion() int16 { + if r.Version >= 3 { + return 1 + } + return 0 +} + +func (r *ListGroupsResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + +func (r *ListGroupsResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 4: + return V2_6_0_0 + case 3: + return V2_4_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: + return V2_6_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go similarity index 95% rename from vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go rename to vendor/github.com/IBM/sarama/list_partition_reassignments_request.go index c1ffa9ba02..c7ad5e9814 100644 --- a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go +++ b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go @@ -83,6 +83,10 @@ func (r *ListPartitionReassignmentsRequest) headerVersion() int16 { return 2 } +func (r *ListPartitionReassignmentsRequest) isValidVersion() bool { + return r.Version == 0 +} + func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion { return V2_4_0_0 } diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go similarity index 94% rename from vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go rename to vendor/github.com/IBM/sarama/list_partition_reassignments_response.go index 4baa6a08e8..426f1c7715 100644 --- a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go +++ b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type PartitionReplicaReassignmentsStatus struct { Replicas []int32 AddingReplicas []int32 @@ -164,6 +166,14 @@ func (r *ListPartitionReassignmentsResponse) headerVersion() int16 { return 1 } +func (r *ListPartitionReassignmentsResponse) isValidVersion() bool { + return r.Version == 0 +} + func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion { return V2_4_0_0 } + +func (r *ListPartitionReassignmentsResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/IBM/sarama/message.go similarity index 100% rename from vendor/github.com/Shopify/sarama/message.go rename to vendor/github.com/IBM/sarama/message.go diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/IBM/sarama/message_set.go similarity index 100% rename from vendor/github.com/Shopify/sarama/message_set.go rename to vendor/github.com/IBM/sarama/message_set.go diff --git a/vendor/github.com/IBM/sarama/metadata_request.go b/vendor/github.com/IBM/sarama/metadata_request.go new file mode 100644 index 0000000000..e76073ea0d --- /dev/null +++ b/vendor/github.com/IBM/sarama/metadata_request.go @@ -0,0 +1,240 @@ +package sarama + +import "encoding/base64" + +type Uuid [16]byte + +func (u Uuid) String() string { + return base64.URLEncoding.WithPadding(base64.NoPadding).EncodeToString(u[:]) +} + +var NullUUID = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +type MetadataRequest struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // Topics contains the topics to fetch metadata for. + Topics []string + // AllowAutoTopicCreation contains a If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so. + AllowAutoTopicCreation bool + IncludeClusterAuthorizedOperations bool // version 8 and up + IncludeTopicAuthorizedOperations bool // version 8 and up +} + +func NewMetadataRequest(version KafkaVersion, topics []string) *MetadataRequest { + m := &MetadataRequest{Topics: topics} + if version.IsAtLeast(V2_8_0_0) { + m.Version = 10 + } else if version.IsAtLeast(V2_4_0_0) { + m.Version = 9 + } else if version.IsAtLeast(V2_4_0_0) { + m.Version = 8 + } else if version.IsAtLeast(V2_1_0_0) { + m.Version = 7 + } else if version.IsAtLeast(V2_0_0_0) { + m.Version = 6 + } else if version.IsAtLeast(V1_0_0_0) { + m.Version = 5 + } else if version.IsAtLeast(V0_11_0_0) { + m.Version = 4 + } else if version.IsAtLeast(V0_10_1_0) { + m.Version = 2 + } else if version.IsAtLeast(V0_10_0_0) { + m.Version = 1 + } + return m +} + +func (r *MetadataRequest) encode(pe packetEncoder) (err error) { + if r.Version < 0 || r.Version > 10 { + return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} + } + if r.Version == 0 || len(r.Topics) > 0 { + if r.Version < 9 { + err := pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + + for i := range r.Topics { + err = pe.putString(r.Topics[i]) + if err != nil { + return err + } + } + } else if r.Version == 9 { + pe.putCompactArrayLength(len(r.Topics)) + for _, topicName := range r.Topics { + if err := pe.putCompactString(topicName); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + } else { // r.Version = 10 + pe.putCompactArrayLength(len(r.Topics)) + for _, topicName := range r.Topics { + if err := pe.putRawBytes(NullUUID); err != nil { + return err + } + // Avoid implicit memory aliasing in for loop + tn := topicName + if err := pe.putNullableCompactString(&tn); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + } + } + } else { + if r.Version < 9 { + pe.putInt32(-1) + } else { + pe.putCompactArrayLength(-1) + } + } + + if r.Version > 3 { + pe.putBool(r.AllowAutoTopicCreation) + } + if r.Version > 7 { + pe.putBool(r.IncludeClusterAuthorizedOperations) + pe.putBool(r.IncludeTopicAuthorizedOperations) + } + if r.Version > 8 { + pe.putEmptyTaggedFieldArray() + } + return nil +} + +func (r *MetadataRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version < 9 { + size, err := pd.getInt32() + if err != nil { + return err + } + if size > 0 { + r.Topics = make([]string, size) + for i := range r.Topics { + topic, err := pd.getString() + if err != nil { + return err + } + r.Topics[i] = topic + } + } + } else if r.Version == 9 { + size, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if size > 0 { + r.Topics = make([]string, size) + } + for i := range r.Topics { + topic, err := pd.getCompactString() + if err != nil { + return err + } + r.Topics[i] = topic + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } else { // version 10+ + size, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if size > 0 { + r.Topics = make([]string, size) + } + for i := range r.Topics { + if _, err = pd.getRawBytes(16); err != nil { // skip UUID + return err + } + topic, err := pd.getCompactNullableString() + if err != nil { + return err + } + if topic != nil { + r.Topics[i] = *topic + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if r.Version >= 4 { + if r.AllowAutoTopicCreation, err = pd.getBool(); err != nil { + return err + } + } + + if r.Version > 7 { + includeClusterAuthz, err := pd.getBool() + if err != nil { + return err + } + r.IncludeClusterAuthorizedOperations = includeClusterAuthz + includeTopicAuthz, err := pd.getBool() + if err != nil { + return err + } + r.IncludeTopicAuthorizedOperations = includeTopicAuthz + } + if r.Version > 8 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return nil +} + +func (r *MetadataRequest) key() int16 { + return 3 +} + +func (r *MetadataRequest) version() int16 { + return r.Version +} + +func (r *MetadataRequest) headerVersion() int16 { + if r.Version >= 9 { + return 2 + } + return 1 +} + +func (r *MetadataRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 10 +} + +func (r *MetadataRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 10: + return V2_8_0_0 + case 9: + return V2_4_0_0 + case 8: + return V2_3_0_0 + case 7: + return V2_1_0_0 + case 6: + return V2_0_0_0 + case 5: + return V1_0_0_0 + case 3, 4: + return V0_11_0_0 + case 2: + return V0_10_1_0 + case 1: + return V0_10_0_0 + case 0: + return V0_8_2_0 + default: + return V2_8_0_0 + } +} diff --git a/vendor/github.com/IBM/sarama/metadata_response.go b/vendor/github.com/IBM/sarama/metadata_response.go new file mode 100644 index 0000000000..dfb5d3a5bd --- /dev/null +++ b/vendor/github.com/IBM/sarama/metadata_response.go @@ -0,0 +1,537 @@ +package sarama + +import "time" + +// PartitionMetadata contains each partition in the topic. +type PartitionMetadata struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // Err contains the partition error, or 0 if there was no error. + Err KError + // ID contains the partition index. + ID int32 + // Leader contains the ID of the leader broker. + Leader int32 + // LeaderEpoch contains the leader epoch of this partition. + LeaderEpoch int32 + // Replicas contains the set of all nodes that host this partition. + Replicas []int32 + // Isr contains the set of nodes that are in sync with the leader for this partition. + Isr []int32 + // OfflineReplicas contains the set of offline replicas of this partition. + OfflineReplicas []int32 +} + +func (p *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) { + p.Version = version + tmp, err := pd.getInt16() + if err != nil { + return err + } + p.Err = KError(tmp) + + if p.ID, err = pd.getInt32(); err != nil { + return err + } + + if p.Leader, err = pd.getInt32(); err != nil { + return err + } + + if p.Version >= 7 { + if p.LeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } + + if p.Version < 9 { + p.Replicas, err = pd.getInt32Array() + } else { + p.Replicas, err = pd.getCompactInt32Array() + } + if err != nil { + return err + } + + if p.Version < 9 { + p.Isr, err = pd.getInt32Array() + } else { + p.Isr, err = pd.getCompactInt32Array() + } + if err != nil { + return err + } + + if p.Version >= 5 { + if p.Version < 9 { + p.OfflineReplicas, err = pd.getInt32Array() + } else { + p.OfflineReplicas, err = pd.getCompactInt32Array() + } + if err != nil { + return err + } + } + + if p.Version >= 9 { + _, err = pd.getEmptyTaggedFieldArray() + if err != nil { + return err + } + } + + return nil +} + +func (p *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) { + p.Version = version + pe.putInt16(int16(p.Err)) + + pe.putInt32(p.ID) + + pe.putInt32(p.Leader) + + if p.Version >= 7 { + pe.putInt32(p.LeaderEpoch) + } + + if p.Version < 9 { + err = pe.putInt32Array(p.Replicas) + } else { + err = pe.putCompactInt32Array(p.Replicas) + } + if err != nil { + return err + } + + if p.Version < 9 { + err = pe.putInt32Array(p.Isr) + } else { + err = pe.putCompactInt32Array(p.Isr) + } + if err != nil { + return err + } + + if p.Version >= 5 { + if p.Version < 9 { + err = pe.putInt32Array(p.OfflineReplicas) + } else { + err = pe.putCompactInt32Array(p.OfflineReplicas) + } + if err != nil { + return err + } + } + + if p.Version >= 9 { + pe.putEmptyTaggedFieldArray() + } + + return nil +} + +// TopicMetadata contains each topic in the response. +type TopicMetadata struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // Err contains the topic error, or 0 if there was no error. + Err KError + // Name contains the topic name. + Name string + Uuid Uuid + // IsInternal contains a True if the topic is internal. + IsInternal bool + // Partitions contains each partition in the topic. + Partitions []*PartitionMetadata + TopicAuthorizedOperations int32 // Only valid for Version >= 8 +} + +func (t *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { + t.Version = version + tmp, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(tmp) + + if t.Version < 9 { + t.Name, err = pd.getString() + } else { + t.Name, err = pd.getCompactString() + } + if err != nil { + return err + } + + if t.Version >= 10 { + uuid, err := pd.getRawBytes(16) + if err != nil { + return err + } + t.Uuid = [16]byte{} + for i := 0; i < 16; i++ { + t.Uuid[i] = uuid[i] + } + } + + if t.Version >= 1 { + t.IsInternal, err = pd.getBool() + if err != nil { + return err + } + } + + var n int + if t.Version < 9 { + n, err = pd.getArrayLength() + } else { + n, err = pd.getCompactArrayLength() + } + if err != nil { + return err + } else { + t.Partitions = make([]*PartitionMetadata, n) + for i := 0; i < n; i++ { + block := &PartitionMetadata{} + if err := block.decode(pd, t.Version); err != nil { + return err + } + t.Partitions[i] = block + } + } + + if t.Version >= 8 { + t.TopicAuthorizedOperations, err = pd.getInt32() + if err != nil { + return err + } + } + + if t.Version >= 9 { + _, err = pd.getEmptyTaggedFieldArray() + if err != nil { + return err + } + } + + return nil +} + +func (t *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { + t.Version = version + pe.putInt16(int16(t.Err)) + + if t.Version < 9 { + err = pe.putString(t.Name) + } else { + err = pe.putCompactString(t.Name) + } + if err != nil { + return err + } + + if t.Version >= 10 { + err = pe.putRawBytes(t.Uuid[:]) + if err != nil { + return err + } + } + + if t.Version >= 1 { + pe.putBool(t.IsInternal) + } + + if t.Version < 9 { + err = pe.putArrayLength(len(t.Partitions)) + if err != nil { + return err + } + } else { + pe.putCompactArrayLength(len(t.Partitions)) + } + for _, block := range t.Partitions { + if err := block.encode(pe, t.Version); err != nil { + return err + } + } + + if t.Version >= 8 { + pe.putInt32(t.TopicAuthorizedOperations) + } + + if t.Version >= 9 { + pe.putEmptyTaggedFieldArray() + } + + return nil +} + +type MetadataResponse struct { + // Version defines the protocol version to use for encode and decode + Version int16 + // ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota. + ThrottleTimeMs int32 + // Brokers contains each broker in the response. + Brokers []*Broker + // ClusterID contains the cluster ID that responding broker belongs to. + ClusterID *string + // ControllerID contains the ID of the controller broker. + ControllerID int32 + // Topics contains each topic in the response. + Topics []*TopicMetadata + ClusterAuthorizedOperations int32 // Only valid for Version >= 8 +} + +func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version >= 3 { + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + } + + var brokerArrayLen int + if r.Version < 9 { + brokerArrayLen, err = pd.getArrayLength() + } else { + brokerArrayLen, err = pd.getCompactArrayLength() + } + if err != nil { + return err + } + + r.Brokers = make([]*Broker, brokerArrayLen) + for i := 0; i < brokerArrayLen; i++ { + r.Brokers[i] = new(Broker) + err = r.Brokers[i].decode(pd, version) + if err != nil { + return err + } + } + + if r.Version >= 2 { + if r.Version < 9 { + r.ClusterID, err = pd.getNullableString() + } else { + r.ClusterID, err = pd.getCompactNullableString() + } + if err != nil { + return err + } + } + + if r.Version >= 1 { + if r.ControllerID, err = pd.getInt32(); err != nil { + return err + } + } + + var topicArrayLen int + if version < 9 { + topicArrayLen, err = pd.getArrayLength() + } else { + topicArrayLen, err = pd.getCompactArrayLength() + } + if err != nil { + return err + } + + r.Topics = make([]*TopicMetadata, topicArrayLen) + for i := 0; i < topicArrayLen; i++ { + r.Topics[i] = new(TopicMetadata) + err = r.Topics[i].decode(pd, version) + if err != nil { + return err + } + } + + if r.Version >= 8 { + r.ClusterAuthorizedOperations, err = pd.getInt32() + if err != nil { + return err + } + } + + if r.Version >= 9 { + _, err := pd.getEmptyTaggedFieldArray() + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + + if r.Version < 9 { + err = pe.putArrayLength(len(r.Brokers)) + if err != nil { + return err + } + } else { + pe.putCompactArrayLength(len(r.Brokers)) + } + + for _, broker := range r.Brokers { + err = broker.encode(pe, r.Version) + if err != nil { + return err + } + } + + if r.Version >= 2 { + if r.Version < 9 { + err = pe.putNullableString(r.ClusterID) + if err != nil { + return err + } + } else { + err = pe.putNullableCompactString(r.ClusterID) + if err != nil { + return err + } + } + } + + if r.Version >= 1 { + pe.putInt32(r.ControllerID) + } + + if r.Version < 9 { + err = pe.putArrayLength(len(r.Topics)) + } else { + pe.putCompactArrayLength(len(r.Topics)) + } + if err != nil { + return err + } + for _, block := range r.Topics { + if err := block.encode(pe, r.Version); err != nil { + return err + } + } + + if r.Version >= 8 { + pe.putInt32(r.ClusterAuthorizedOperations) + } + + if r.Version >= 9 { + pe.putEmptyTaggedFieldArray() + } + + return nil +} + +func (r *MetadataResponse) key() int16 { + return 3 +} + +func (r *MetadataResponse) version() int16 { + return r.Version +} + +func (r *MetadataResponse) headerVersion() int16 { + if r.Version < 9 { + return 0 + } else { + return 1 + } +} + +func (r *MetadataResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + +func (r *MetadataResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 10: + return V2_8_0_0 + case 9: + return V2_4_0_0 + case 8: + return V2_3_0_0 + case 7: + return V2_1_0_0 + case 6: + return V2_0_0_0 + case 5: + return V1_0_0_0 + case 3, 4: + return V0_11_0_0 + case 2: + return V0_10_1_0 + case 1: + return V0_10_0_0 + case 0: + return V0_8_2_0 + default: + return V2_8_0_0 + } +} + +func (r *MetadataResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} + +// testing API + +func (r *MetadataResponse) AddBroker(addr string, id int32) { + r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) +} + +func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { + var tmatch *TopicMetadata + + for _, tm := range r.Topics { + if tm.Name == topic { + tmatch = tm + goto foundTopic + } + } + + tmatch = new(TopicMetadata) + tmatch.Name = topic + r.Topics = append(r.Topics, tmatch) + +foundTopic: + + tmatch.Err = err + return tmatch +} + +func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) { + tmatch := r.AddTopic(topic, ErrNoError) + var pmatch *PartitionMetadata + + for _, pm := range tmatch.Partitions { + if pm.ID == partition { + pmatch = pm + goto foundPartition + } + } + + pmatch = new(PartitionMetadata) + pmatch.ID = partition + tmatch.Partitions = append(tmatch.Partitions, pmatch) + +foundPartition: + pmatch.Leader = brokerID + pmatch.Replicas = replicas + if pmatch.Replicas == nil { + pmatch.Replicas = []int32{} + } + pmatch.Isr = isr + if pmatch.Isr == nil { + pmatch.Isr = []int32{} + } + pmatch.OfflineReplicas = offline + if pmatch.OfflineReplicas == nil { + pmatch.OfflineReplicas = []int32{} + } + pmatch.Err = err +} diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/IBM/sarama/metrics.go similarity index 97% rename from vendor/github.com/Shopify/sarama/metrics.go rename to vendor/github.com/IBM/sarama/metrics.go index 7b7705f2e3..de8ad95c74 100644 --- a/vendor/github.com/Shopify/sarama/metrics.go +++ b/vendor/github.com/IBM/sarama/metrics.go @@ -32,7 +32,7 @@ func getMetricNameForBroker(name string, broker *Broker) string { func getMetricNameForTopic(name string, topic string) string { // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy // cf. KAFKA-1902 and KAFKA-2337 - return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1)) + return fmt.Sprintf(name+"-for-topic-%s", strings.ReplaceAll(topic, ".", "_")) } func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/IBM/sarama/mockbroker.go similarity index 87% rename from vendor/github.com/Shopify/sarama/mockbroker.go rename to vendor/github.com/IBM/sarama/mockbroker.go index 628c3cb90c..2c5e7cadde 100644 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/IBM/sarama/mockbroker.go @@ -10,6 +10,7 @@ import ( "reflect" "strconv" "sync" + "syscall" "time" "github.com/davecgh/go-spew/spew" @@ -98,6 +99,20 @@ func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { }) } +// SetHandlerFuncByMap defines mapping of Request types to RequestHandlerFunc. When a +// request is received by the broker, it looks up the request type in the map +// and invoke the found RequestHandlerFunc instance to generate an appropriate reply. +func (b *MockBroker) SetHandlerFuncByMap(handlerMap map[string]requestHandlerFunc) { + fnMap := make(map[string]requestHandlerFunc) + for k, v := range handlerMap { + fnMap[k] = v + } + b.setHandler(func(req *request) (res encoderWithHeader) { + reqTypeName := reflect.TypeOf(req.body).Elem().Name() + return fnMap[reqTypeName](req) + }) +} + // SetNotifier set a function that will get invoked whenever a request has been // processed successfully and will provide the number of bytes read and written func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { @@ -178,7 +193,9 @@ func (b *MockBroker) serverLoop() { i++ } wg.Wait() - Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) + if !isConnectionClosedError(err) { + Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) + } } func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) { @@ -243,8 +260,10 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W for { buffer, err := b.readToBytes(conn) if err != nil { - Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer)) - b.serverError(err) + if !isConnectionClosedError(err) { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer)) + b.serverError(err) + } break } @@ -253,8 +272,10 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W req, br, err := decodeRequest(bytes.NewReader(buffer)) bytesRead = br if err != nil { - Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) - b.serverError(err) + if !isConnectionClosedError(err) { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + } break } @@ -280,7 +301,7 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W encodedRes, err := encode(res, nil) if err != nil { - b.serverError(err) + b.serverError(fmt.Errorf("failed to encode %T - %w", res, err)) break } if len(encodedRes) == 0 { @@ -358,21 +379,25 @@ func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) } } -func (b *MockBroker) serverError(err error) { - isConnectionClosedError := false +func isConnectionClosedError(err error) bool { + var result bool opError := &net.OpError{} if errors.As(err, &opError) { - isConnectionClosedError = true + result = true } else if errors.Is(err, io.EOF) { - isConnectionClosedError = true + result = true } else if err.Error() == "use of closed network connection" { - isConnectionClosedError = true + result = true } - if isConnectionClosedError { + return result +} + +func (b *MockBroker) serverError(err error) { + b.t.Helper() + if isConnectionClosedError(err) { return } - b.t.Errorf(err.Error()) } @@ -386,10 +411,29 @@ func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { // NewMockBrokerAddr behaves like newMockBroker but listens on the address you give // it rather than just some ephemeral port. func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { - listener, err := net.Listen("tcp", addr) + var ( + listener net.Listener + err error + ) + + // retry up to 20 times if address already in use (e.g., if replacing broker which hasn't cleanly shutdown) + for i := 0; i < 20; i++ { + listener, err = net.Listen("tcp", addr) + if err != nil { + if errors.Is(err, syscall.EADDRINUSE) { + Logger.Printf("*** mockbroker/%d waiting for %s (address already in use)", brokerID, addr) + time.Sleep(time.Millisecond * 100) + continue + } + t.Fatal(err) + } + break + } + if err != nil { t.Fatal(err) } + return NewMockBrokerListener(t, brokerID, listener) } diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/IBM/sarama/mockkerberos.go similarity index 100% rename from vendor/github.com/Shopify/sarama/mockkerberos.go rename to vendor/github.com/IBM/sarama/mockkerberos.go diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/IBM/sarama/mockresponses.go similarity index 90% rename from vendor/github.com/Shopify/sarama/mockresponses.go rename to vendor/github.com/IBM/sarama/mockresponses.go index 15b4367f99..d09415b49a 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/IBM/sarama/mockresponses.go @@ -13,6 +13,7 @@ type TestReporter interface { Errorf(string, ...interface{}) Fatal(...interface{}) Fatalf(string, ...interface{}) + Helper() } // MockResponse is a response builder interface it defines one method that @@ -82,9 +83,9 @@ func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse { func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { request := reqBody.(*ListGroupsRequest) - _ = request response := &ListGroupsResponse{ - Groups: m.groups, + Version: request.Version, + Groups: m.groups, } return response } @@ -114,7 +115,7 @@ func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, descrip func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { request := reqBody.(*DescribeGroupsRequest) - response := &DescribeGroupsResponse{} + response := &DescribeGroupsResponse{Version: request.version()} for _, requestedGroup := range request.Groups { if group, ok := m.groups[requestedGroup]; ok { response.Groups = append(response.Groups, group) @@ -134,6 +135,7 @@ func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHe // MockMetadataResponse is a `MetadataResponse` builder. type MockMetadataResponse struct { controllerID int32 + errors map[string]KError leaders map[string]map[int32]int32 brokers map[string]int32 t TestReporter @@ -141,12 +143,18 @@ type MockMetadataResponse struct { func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { return &MockMetadataResponse{ + errors: make(map[string]KError), leaders: make(map[string]map[int32]int32), brokers: make(map[string]int32), t: t, } } +func (mmr *MockMetadataResponse) SetError(topic string, kerror KError) *MockMetadataResponse { + mmr.errors[topic] = kerror + return mmr +} + func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { partitions := mmr.leaders[topic] if partitions == nil { @@ -190,10 +198,22 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } + for topic, err := range mmr.errors { + metadataResponse.AddTopic(topic, err) + } return metadataResponse } for _, topic := range metadataRequest.Topics { - for partition, brokerID := range mmr.leaders[topic] { + leaders, ok := mmr.leaders[topic] + if !ok { + if err, ok := mmr.errors[topic]; ok { + metadataResponse.AddTopic(topic, err) + } else { + metadataResponse.AddTopic(topic, ErrUnknownTopicOrPartition) + } + continue + } + for partition, brokerID := range leaders { metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } @@ -233,7 +253,7 @@ func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { offsetResponse := &OffsetResponse{Version: offsetRequest.Version} for topic, partitions := range offsetRequest.blocks { for partition, block := range partitions { - offset := mor.getOffset(topic, partition, block.time) + offset := mor.getOffset(topic, partition, block.timestamp) offsetResponse.AddTopicPartition(topic, partition, offset) } } @@ -410,7 +430,7 @@ func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *M func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ConsumerMetadataRequest) group := req.ConsumerGroup - res := &ConsumerMetadataResponse{} + res := &ConsumerMetadataResponse{Version: req.version()} v := mr.coordinators[group] switch v := v.(type) { case *MockBroker: @@ -458,8 +478,7 @@ func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*FindCoordinatorRequest) - res := &FindCoordinatorResponse{} - res.Version = req.Version + res := &FindCoordinatorResponse{Version: req.version()} var v interface{} switch req.CoordinatorType { case CoordinatorGroup: @@ -507,7 +526,7 @@ func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int3 func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetCommitRequest) group := req.ConsumerGroup - res := &OffsetCommitResponse{} + res := &OffsetCommitResponse{Version: req.version()} for topic, partitions := range req.blocks { for partition := range partitions { res.AddError(topic, partition, mr.getError(group, topic, partition)) @@ -564,7 +583,10 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ProduceRequest) res := &ProduceResponse{ - Version: mr.version, + Version: req.version(), + } + if mr.version > 0 { + res.Version = mr.version } for topic, partitions := range req.records { for partition := range partitions { @@ -667,7 +689,8 @@ func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHea } type MockDeleteTopicsResponse struct { - t TestReporter + t TestReporter + error KError } func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { @@ -676,16 +699,21 @@ func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteTopicsRequest) - res := &DeleteTopicsResponse{} + res := &DeleteTopicsResponse{Version: req.version()} res.TopicErrorCodes = make(map[string]KError) for _, topic := range req.Topics { - res.TopicErrorCodes[topic] = ErrNoError + res.TopicErrorCodes[topic] = mr.error } res.Version = req.Version return res } +func (mr *MockDeleteTopicsResponse) SetError(kerror KError) *MockDeleteTopicsResponse { + mr.error = kerror + return mr +} + type MockCreatePartitionsResponse struct { t TestReporter } @@ -696,7 +724,7 @@ func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsRespon func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreatePartitionsRequest) - res := &CreatePartitionsResponse{} + res := &CreatePartitionsResponse{Version: req.version()} res.TopicPartitionErrors = make(map[string]*TopicPartitionError) for topic := range req.TopicPartitions { @@ -724,7 +752,7 @@ func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartit func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterPartitionReassignmentsRequest) _ = req - res := &AlterPartitionReassignmentsResponse{} + res := &AlterPartitionReassignmentsResponse{Version: req.version()} return res } @@ -739,7 +767,7 @@ func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitio func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ListPartitionReassignmentsRequest) _ = req - res := &ListPartitionReassignmentsResponse{} + res := &ListPartitionReassignmentsResponse{Version: req.version()} for topic, partitions := range req.blocks { for _, partition := range partitions { @@ -760,7 +788,7 @@ func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse { func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteRecordsRequest) - res := &DeleteRecordsResponse{} + res := &DeleteRecordsResponse{Version: req.version()} res.Topics = make(map[string]*DeleteRecordsResponseTopic) for topic, deleteRecordRequestTopic := range req.Topics { @@ -906,7 +934,7 @@ func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse { func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterConfigsRequest) - res := &AlterConfigsResponse{} + res := &AlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ @@ -928,7 +956,7 @@ func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsR func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterConfigsRequest) - res := &AlterConfigsResponse{} + res := &AlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ @@ -951,7 +979,7 @@ func NewMockIncrementalAlterConfigsResponse(t TestReporter) *MockIncrementalAlte func (mr *MockIncrementalAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*IncrementalAlterConfigsRequest) - res := &IncrementalAlterConfigsResponse{} + res := &IncrementalAlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ @@ -973,7 +1001,7 @@ func NewMockIncrementalAlterConfigsResponseWithErrorCode(t TestReporter) *MockIn func (mr *MockIncrementalAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*IncrementalAlterConfigsRequest) - res := &IncrementalAlterConfigsResponse{} + res := &IncrementalAlterConfigsResponse{Version: req.version()} for _, r := range req.Resources { res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ @@ -996,7 +1024,7 @@ func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse { func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateAclsRequest) - res := &CreateAclsResponse{} + res := &CreateAclsResponse{Version: req.version()} for range req.AclCreations { res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError}) @@ -1014,7 +1042,7 @@ func NewMockCreateAclsResponseWithError(t TestReporter) *MockCreateAclsResponseE func (mr *MockCreateAclsResponseError) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateAclsRequest) - res := &CreateAclsResponse{} + res := &CreateAclsResponse{Version: req.version()} for range req.AclCreations { res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrInvalidRequest}) @@ -1032,7 +1060,7 @@ func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse { func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeAclsRequest) - res := &DescribeAclsResponse{} + res := &DescribeAclsResponse{Version: req.version()} res.Err = ErrNoError acl := &ResourceAcls{} if req.ResourceName != nil { @@ -1075,11 +1103,12 @@ func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateRespon func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*SaslAuthenticateRequest) - res := &SaslAuthenticateResponse{} - res.Version = req.Version - res.Err = msar.kerror - res.SaslAuthBytes = msar.saslAuthBytes - res.SessionLifetimeMs = msar.sessionLifetimeMs + res := &SaslAuthenticateResponse{ + Version: req.version(), + Err: msar.kerror, + SaslAuthBytes: msar.saslAuthBytes, + SessionLifetimeMs: msar.sessionLifetimeMs, + } return res } @@ -1113,7 +1142,8 @@ func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse { } func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader { - res := &SaslHandshakeResponse{} + req := reqBody.(*SaslHandshakeRequest) + res := &SaslHandshakeResponse{Version: req.version()} res.Err = mshr.kerror res.EnabledMechanisms = mshr.enabledMechanisms return res @@ -1135,7 +1165,7 @@ func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteAclsRequest) - res := &DeleteAclsResponse{} + res := &DeleteAclsResponse{Version: req.version()} for range req.Filters { response := &FilterResponse{Err: ErrNoError} @@ -1160,7 +1190,9 @@ func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDelete } func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteGroupsRequest) resp := &DeleteGroupsResponse{ + Version: req.version(), GroupErrorCodes: map[string]KError{}, } for _, group := range m.deletedGroups { @@ -1189,7 +1221,9 @@ func (m *MockDeleteOffsetResponse) SetDeletedOffset(errorCode KError, topic stri } func (m *MockDeleteOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteOffsetsRequest) resp := &DeleteOffsetsResponse{ + Version: req.version(), ErrorCode: m.errorCode, Errors: map[string]map[int32]KError{ m.topic: {m.partition: m.errorPartition}, @@ -1282,8 +1316,10 @@ func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse { } func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*LeaveGroupRequest) resp := &LeaveGroupResponse{ - Err: m.Err, + Version: req.version(), + Err: m.Err, } return resp } @@ -1305,7 +1341,9 @@ func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse { } func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*SyncGroupRequest) resp := &SyncGroupResponse{ + Version: req.version(), Err: m.Err, MemberAssignment: m.MemberAssignment, } @@ -1337,7 +1375,10 @@ func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse { } func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader { - resp := &HeartbeatResponse{} + req := reqBody.(*HeartbeatRequest) + resp := &HeartbeatResponse{ + Version: req.version(), + } return resp } @@ -1382,7 +1423,9 @@ func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartiti } func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeLogDirsRequest) resp := &DescribeLogDirsResponse{ + Version: req.version(), LogDirs: m.logDirs, } return resp @@ -1424,3 +1467,43 @@ func (m *MockApiVersionsResponse) For(reqBody versionedDecoder) encoderWithHeade } return res } + +// MockInitProducerIDResponse is an `InitPorducerIDResponse` builder. +type MockInitProducerIDResponse struct { + producerID int64 + producerEpoch int16 + err KError + t TestReporter +} + +func NewMockInitProducerIDResponse(t TestReporter) *MockInitProducerIDResponse { + return &MockInitProducerIDResponse{ + t: t, + } +} + +func (m *MockInitProducerIDResponse) SetProducerID(id int) *MockInitProducerIDResponse { + m.producerID = int64(id) + return m +} + +func (m *MockInitProducerIDResponse) SetProducerEpoch(epoch int) *MockInitProducerIDResponse { + m.producerEpoch = int16(epoch) + return m +} + +func (m *MockInitProducerIDResponse) SetError(err KError) *MockInitProducerIDResponse { + m.err = err + return m +} + +func (m *MockInitProducerIDResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*InitProducerIDRequest) + res := &InitProducerIDResponse{ + Version: req.Version, + Err: m.err, + ProducerID: m.producerID, + ProducerEpoch: m.producerEpoch, + } + return res +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/IBM/sarama/offset_commit_request.go similarity index 93% rename from vendor/github.com/Shopify/sarama/offset_commit_request.go rename to vendor/github.com/IBM/sarama/offset_commit_request.go index 5dd88220d9..45d1977d41 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/IBM/sarama/offset_commit_request.go @@ -201,26 +201,34 @@ func (r *OffsetCommitRequest) headerVersion() int16 { return 1 } +func (r *OffsetCommitRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_8_2_0 - case 2: - return V0_9_0_0 - case 3: - return V0_11_0_0 - case 4: - return V2_0_0_0 - case 5, 6: - return V2_1_0_0 case 7: return V2_3_0_0 + case 5, 6: + return V2_1_0_0 + case 4: + return V2_0_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_9_0_0 + case 0, 1: + return V0_8_2_0 default: - return MinVersion + return V2_4_0_0 } } -func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + r.AddBlockWithLeaderEpoch(topic, partitionID, offset, 0, timestamp, metadata) +} + +func (r *OffsetCommitRequest) AddBlockWithLeaderEpoch(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) } diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/IBM/sarama/offset_commit_response.go similarity index 87% rename from vendor/github.com/Shopify/sarama/offset_commit_response.go rename to vendor/github.com/IBM/sarama/offset_commit_response.go index 4bed269aa5..523508fa48 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_response.go +++ b/vendor/github.com/IBM/sarama/offset_commit_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type OffsetCommitResponse struct { Version int16 ThrottleTimeMs int32 @@ -98,19 +100,29 @@ func (r *OffsetCommitResponse) headerVersion() int16 { return 0 } +func (r *OffsetCommitResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_8_2_0 - case 2: - return V0_9_0_0 - case 3: - return V0_11_0_0 + case 7: + return V2_3_0_0 + case 5, 6: + return V2_1_0_0 case 4: return V2_0_0_0 - case 5, 6, 7: - return V2_3_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_9_0_0 + case 0, 1: + return V0_8_2_0 default: - return MinVersion + return V2_4_0_0 } } + +func (r *OffsetCommitResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/IBM/sarama/offset_fetch_request.go similarity index 73% rename from vendor/github.com/Shopify/sarama/offset_fetch_request.go rename to vendor/github.com/IBM/sarama/offset_fetch_request.go index 7e147eb60c..0c9b8405bd 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go +++ b/vendor/github.com/IBM/sarama/offset_fetch_request.go @@ -7,6 +7,43 @@ type OffsetFetchRequest struct { partitions map[string][]int32 } +func NewOffsetFetchRequest( + version KafkaVersion, + group string, + partitions map[string][]int32, +) *OffsetFetchRequest { + request := &OffsetFetchRequest{ + ConsumerGroup: group, + partitions: partitions, + } + if version.IsAtLeast(V2_5_0_0) { + // Version 7 is adding the require stable flag. + request.Version = 7 + } else if version.IsAtLeast(V2_4_0_0) { + // Version 6 is the first flexible version. + request.Version = 6 + } else if version.IsAtLeast(V2_1_0_0) { + // Version 3, 4, and 5 are the same as version 2. + request.Version = 5 + } else if version.IsAtLeast(V2_0_0_0) { + request.Version = 4 + } else if version.IsAtLeast(V0_11_0_0) { + request.Version = 3 + } else if version.IsAtLeast(V0_10_2_0) { + // Starting in version 2, the request can contain a null topics array to indicate that offsets + // for all topics should be fetched. It also returns a top level error code + // for group or coordinator level errors. + request.Version = 2 + } else if version.IsAtLeast(V0_8_2_0) { + // In version 0, the request read offsets from ZK. + // + // Starting in version 1, the broker supports fetching offsets from the internal __consumer_offsets topic. + request.Version = 1 + } + + return request +} + func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { if r.Version < 0 || r.Version > 7 { return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} @@ -171,24 +208,30 @@ func (r *OffsetFetchRequest) headerVersion() int16 { return 1 } +func (r *OffsetFetchRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_8_2_0 - case 2: - return V0_10_2_0 - case 3: - return V0_11_0_0 - case 4: - return V2_0_0_0 - case 5: - return V2_1_0_0 - case 6: - return V2_4_0_0 case 7: return V2_5_0_0 + case 6: + return V2_4_0_0 + case 5: + return V2_1_0_0 + case 4: + return V2_0_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_10_2_0 + case 1: + return V0_8_2_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_5_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/IBM/sarama/offset_fetch_response.go similarity index 94% rename from vendor/github.com/Shopify/sarama/offset_fetch_response.go rename to vendor/github.com/IBM/sarama/offset_fetch_response.go index 19449220f2..7ce7927d8d 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go +++ b/vendor/github.com/IBM/sarama/offset_fetch_response.go @@ -1,5 +1,7 @@ package sarama +import "time" + type OffsetFetchResponseBlock struct { Offset int64 LeaderEpoch int32 @@ -20,6 +22,8 @@ func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err if err != nil { return err } + } else { + b.LeaderEpoch = -1 } if isFlexible { @@ -234,27 +238,37 @@ func (r *OffsetFetchResponse) headerVersion() int16 { return 0 } +func (r *OffsetFetchResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_8_2_0 - case 2: - return V0_10_2_0 - case 3: - return V0_11_0_0 - case 4: - return V2_0_0_0 - case 5: - return V2_1_0_0 - case 6: - return V2_4_0_0 case 7: return V2_5_0_0 + case 6: + return V2_4_0_0 + case 5: + return V2_1_0_0 + case 4: + return V2_0_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_10_2_0 + case 1: + return V0_8_2_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_5_0_0 } } +func (r *OffsetFetchResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} + func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { if r.Blocks == nil { return nil diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go similarity index 88% rename from vendor/github.com/Shopify/sarama/offset_manager.go rename to vendor/github.com/IBM/sarama/offset_manager.go index 1ea15ff939..1bf5459089 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/IBM/sarama/offset_manager.go @@ -153,11 +153,8 @@ func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retri return om.fetchInitialOffset(topic, partition, retries-1) } - req := new(OffsetFetchRequest) - req.Version = 1 - req.ConsumerGroup = om.group - req.AddPartition(topic, partition) - + partitions := map[string][]int32{topic: {partition}} + req := NewOffsetFetchRequest(om.conf.Version, om.group, partitions) resp, err := broker.FetchOffset(req) if err != nil { if retries <= 0 { @@ -277,23 +274,53 @@ func (om *offsetManager) flushToBroker() { } func (om *offsetManager) constructRequest() *OffsetCommitRequest { - var r *OffsetCommitRequest - var perPartitionTimestamp int64 - if om.conf.Consumer.Offsets.Retention == 0 { - perPartitionTimestamp = ReceiveTime - r = &OffsetCommitRequest{ - Version: 1, - ConsumerGroup: om.group, - ConsumerID: om.memberID, - ConsumerGroupGeneration: om.generation, - } - } else { - r = &OffsetCommitRequest{ - Version: 2, - RetentionTime: int64(om.conf.Consumer.Offsets.Retention / time.Millisecond), - ConsumerGroup: om.group, - ConsumerID: om.memberID, - ConsumerGroupGeneration: om.generation, + r := &OffsetCommitRequest{ + Version: 1, + ConsumerGroup: om.group, + ConsumerID: om.memberID, + ConsumerGroupGeneration: om.generation, + } + // Version 1 adds timestamp and group membership information, as well as the commit timestamp. + // + // Version 2 adds retention time. It removes the commit timestamp added in version 1. + if om.conf.Version.IsAtLeast(V0_9_0_0) { + r.Version = 2 + } + // Version 3 and 4 are the same as version 2. + if om.conf.Version.IsAtLeast(V0_11_0_0) { + r.Version = 3 + } + if om.conf.Version.IsAtLeast(V2_0_0_0) { + r.Version = 4 + } + // Version 5 removes the retention time, which is now controlled only by a broker configuration. + // + // Version 6 adds the leader epoch for fencing. + if om.conf.Version.IsAtLeast(V2_1_0_0) { + r.Version = 6 + } + // version 7 adds a new field called groupInstanceId to indicate member identity across restarts. + if om.conf.Version.IsAtLeast(V2_3_0_0) { + r.Version = 7 + r.GroupInstanceId = om.groupInstanceId + } + + // commit timestamp was only briefly supported in V1 where we set it to + // ReceiveTime (-1) to tell the broker to set it to the time when the commit + // request was received + var commitTimestamp int64 + if r.Version == 1 { + commitTimestamp = ReceiveTime + } + + // request controlled retention was only supported from V2-V4 (it became + // broker-only after that) so if the user has set the config options then + // flow those through as retention time on the commit request. + if r.Version >= 2 && r.Version < 5 { + // Map Sarama's default of 0 to Kafka's default of -1 + r.RetentionTime = -1 + if om.conf.Consumer.Offsets.Retention > 0 { + r.RetentionTime = int64(om.conf.Consumer.Offsets.Retention / time.Millisecond) } } @@ -304,17 +331,12 @@ func (om *offsetManager) constructRequest() *OffsetCommitRequest { for _, pom := range topicManagers { pom.lock.Lock() if pom.dirty { - r.AddBlock(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, perPartitionTimestamp, pom.metadata) + r.AddBlockWithLeaderEpoch(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, commitTimestamp, pom.metadata) } pom.lock.Unlock() } } - if om.groupInstanceId != nil { - r.Version = 7 - r.GroupInstanceId = om.groupInstanceId - } - if len(r.blocks) > 0 { return r } @@ -359,13 +381,13 @@ func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest // nothing wrong but we didn't commit, we'll get it next time round case ErrFencedInstancedId: pom.handleError(err) - // TODO close the whole consumer for instacne fenced.... + // TODO close the whole consumer for instance fenced.... om.tryCancelSession() case ErrUnknownTopicOrPartition: // let the user know *and* try redispatching - if topic-auto-create is // enabled, redispatching should trigger a metadata req and create the // topic; if not then re-dispatching won't help, but we've let the user - // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + // know and it shouldn't hurt either (see https://github.com/IBM/sarama/issues/706) fallthrough default: // dunno, tell the user and try redispatching diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/IBM/sarama/offset_request.go similarity index 76% rename from vendor/github.com/Shopify/sarama/offset_request.go rename to vendor/github.com/IBM/sarama/offset_request.go index 4c9ce4df55..13de0a89f1 100644 --- a/vendor/github.com/Shopify/sarama/offset_request.go +++ b/vendor/github.com/IBM/sarama/offset_request.go @@ -1,28 +1,46 @@ package sarama type offsetRequestBlock struct { - time int64 - maxOffsets int32 // Only used in version 0 + // currentLeaderEpoch contains the current leader epoch (used in version 4+). + currentLeaderEpoch int32 + // timestamp contains the current timestamp. + timestamp int64 + // maxNumOffsets contains the maximum number of offsets to report. + maxNumOffsets int32 // Only used in version 0 } func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { - pe.putInt64(b.time) + if version >= 4 { + pe.putInt32(b.currentLeaderEpoch) + } + + pe.putInt64(b.timestamp) + if version == 0 { - pe.putInt32(b.maxOffsets) + pe.putInt32(b.maxNumOffsets) } return nil } func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { - if b.time, err = pd.getInt64(); err != nil { + b.currentLeaderEpoch = -1 + if version >= 4 { + if b.currentLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } + + if b.timestamp, err = pd.getInt64(); err != nil { return err } + if version == 0 { - if b.maxOffsets, err = pd.getInt32(); err != nil { + if b.maxNumOffsets, err = pd.getInt32(); err != nil { return err } } + return nil } @@ -137,14 +155,24 @@ func (r *OffsetRequest) headerVersion() int16 { return 1 } +func (r *OffsetRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + func (r *OffsetRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_10_1_0 + case 4: + return V2_1_0_0 + case 3: + return V2_0_0_0 case 2: return V0_11_0_0 + case 1: + return V0_10_1_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_0_0_0 } } @@ -160,7 +188,7 @@ func (r *OffsetRequest) ReplicaID() int32 { return -1 } -func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { +func (r *OffsetRequest) AddBlock(topic string, partitionID int32, timestamp int64, maxOffsets int32) { if r.blocks == nil { r.blocks = make(map[string]map[int32]*offsetRequestBlock) } @@ -170,9 +198,10 @@ func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, ma } tmp := new(offsetRequestBlock) - tmp.time = time + tmp.currentLeaderEpoch = -1 + tmp.timestamp = timestamp if r.Version == 0 { - tmp.maxOffsets = maxOffsets + tmp.maxNumOffsets = maxOffsets } r.blocks[topic][partitionID] = tmp diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/IBM/sarama/offset_response.go similarity index 73% rename from vendor/github.com/Shopify/sarama/offset_response.go rename to vendor/github.com/IBM/sarama/offset_response.go index ffe84664c5..6c62e07913 100644 --- a/vendor/github.com/Shopify/sarama/offset_response.go +++ b/vendor/github.com/IBM/sarama/offset_response.go @@ -1,10 +1,17 @@ package sarama +import "time" + type OffsetResponseBlock struct { - Err KError - Offsets []int64 // Version 0 - Offset int64 // Version 1 - Timestamp int64 // Version 1 + Err KError + // Offsets contains the result offsets (for V0/V1 compatibility) + Offsets []int64 // Version 0 + // Timestamp contains the timestamp associated with the returned offset. + Timestamp int64 // Version 1 + // Offset contains the returned offset. + Offset int64 // Version 1 + // LeaderEpoch contains the current leader epoch of the partition. + LeaderEpoch int32 } func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { @@ -16,22 +23,29 @@ func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error if version == 0 { b.Offsets, err = pd.getInt64Array() - return err } - b.Timestamp, err = pd.getInt64() - if err != nil { - return err - } + if version >= 1 { + b.Timestamp, err = pd.getInt64() + if err != nil { + return err + } - b.Offset, err = pd.getInt64() - if err != nil { - return err + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + // For backwards compatibility put the offset in the offsets array too + b.Offsets = []int64{b.Offset} } - // For backwards compatibility put the offset in the offsets array too - b.Offsets = []int64{b.Offset} + if version >= 4 { + if b.LeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } return nil } @@ -43,8 +57,14 @@ func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error return pe.putInt64Array(b.Offsets) } - pe.putInt64(b.Timestamp) - pe.putInt64(b.Offset) + if version >= 1 { + pe.putInt64(b.Timestamp) + pe.putInt64(b.Offset) + } + + if version >= 4 { + pe.putInt32(b.LeaderEpoch) + } return nil } @@ -165,17 +185,31 @@ func (r *OffsetResponse) headerVersion() int16 { return 0 } +func (r *OffsetResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 4 +} + func (r *OffsetResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_10_1_0 + case 4: + return V2_1_0_0 + case 3: + return V2_0_0_0 case 2: return V0_11_0_0 + case 1: + return V0_10_1_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_0_0_0 } } +func (r *OffsetResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} + // testing API func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/IBM/sarama/packet_decoder.go similarity index 98% rename from vendor/github.com/Shopify/sarama/packet_decoder.go rename to vendor/github.com/IBM/sarama/packet_decoder.go index b8cae5350a..526e0f42fe 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/IBM/sarama/packet_decoder.go @@ -55,7 +55,7 @@ type pushDecoder interface { // Saves the offset into the input buffer as the location to actually read the calculated value when able. saveOffset(in int) - // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + // Returns the length of data to reserve for the input of this encoder (e.g. 4 bytes for a CRC32). reserveLength() int // Indicates that all required data is now available to calculate and check the field. diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/IBM/sarama/packet_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/packet_encoder.go rename to vendor/github.com/IBM/sarama/packet_encoder.go diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/IBM/sarama/partitioner.go similarity index 86% rename from vendor/github.com/Shopify/sarama/partitioner.go rename to vendor/github.com/IBM/sarama/partitioner.go index 57377760a7..50a345a3eb 100644 --- a/vendor/github.com/Shopify/sarama/partitioner.go +++ b/vendor/github.com/IBM/sarama/partitioner.go @@ -2,6 +2,7 @@ package sarama import ( "hash" + "hash/crc32" "hash/fnv" "math/rand" "time" @@ -53,6 +54,15 @@ func WithAbsFirst() HashPartitionerOption { } } +// WithHashUnsigned means the partitioner treats the hashed value as unsigned when +// partitioning. This is intended to be combined with the crc32 hash algorithm to +// be compatible with librdkafka's implementation +func WithHashUnsigned() HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.hashUnsigned = true + } +} + // WithCustomHashFunction lets you specify what hash function to use for the partitioning func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption { return func(hp *hashPartitioner) { @@ -126,6 +136,7 @@ type hashPartitioner struct { random Partitioner hasher hash.Hash32 referenceAbs bool + hashUnsigned bool } // NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. @@ -137,6 +148,7 @@ func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor p.random = NewRandomPartitioner(topic) p.hasher = hasher() p.referenceAbs = false + p.hashUnsigned = false return p } } @@ -148,6 +160,7 @@ func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstruct p.random = NewRandomPartitioner(topic) p.hasher = fnv.New32a() p.referenceAbs = false + p.hashUnsigned = false for _, option := range options { option(p) } @@ -164,6 +177,7 @@ func NewHashPartitioner(topic string) Partitioner { p.random = NewRandomPartitioner(topic) p.hasher = fnv.New32a() p.referenceAbs = false + p.hashUnsigned = false return p } @@ -176,6 +190,19 @@ func NewReferenceHashPartitioner(topic string) Partitioner { p.random = NewRandomPartitioner(topic) p.hasher = fnv.New32a() p.referenceAbs = true + p.hashUnsigned = false + return p +} + +// NewConsistentCRCHashPartitioner is like NewHashPartitioner execpt that it uses the *unsigned* crc32 hash +// of the encoded bytes of the message key modulus the number of partitions. This is compatible with +// librdkafka's `consistent_random` partitioner +func NewConsistentCRCHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = crc32.NewIEEE() + p.referenceAbs = false + p.hashUnsigned = true return p } @@ -199,6 +226,10 @@ func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int3 // but not past Sarama versions if p.referenceAbs { partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions + } else if p.hashUnsigned { + // librdkafka treats the hashed value as unsigned. If `hashUnsigned` is set we are compatible + // with librdkafka's `consistent` partitioning but not past Sarama versions + partition = int32(p.hasher.Sum32() % uint32(numPartitions)) } else { partition = int32(p.hasher.Sum32()) % numPartitions if partition < 0 { diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/IBM/sarama/prep_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/prep_encoder.go rename to vendor/github.com/IBM/sarama/prep_encoder.go diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/IBM/sarama/produce_request.go similarity index 95% rename from vendor/github.com/Shopify/sarama/produce_request.go rename to vendor/github.com/IBM/sarama/produce_request.go index 0034651e25..cbe58dd827 100644 --- a/vendor/github.com/Shopify/sarama/produce_request.go +++ b/vendor/github.com/IBM/sarama/produce_request.go @@ -29,7 +29,8 @@ type ProduceRequest struct { } func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, - topicCompressionRatioMetric metrics.Histogram) int64 { + topicCompressionRatioMetric metrics.Histogram, +) int64 { var topicRecordCount int64 for _, messageBlock := range msgSet.Messages { // Is this a fake "message" wrapping real messages? @@ -53,7 +54,8 @@ func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Hist } func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, - topicCompressionRatioMetric metrics.Histogram) int64 { + topicCompressionRatioMetric metrics.Histogram, +) int64 { if recordBatch.compressedRecords != nil { compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) compressionRatioMetric.Update(compressionRatio) @@ -210,18 +212,28 @@ func (r *ProduceRequest) headerVersion() int16 { return 1 } +func (r *ProduceRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *ProduceRequest) requiredVersion() KafkaVersion { switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - case 3: - return V0_11_0_0 case 7: return V2_1_0_0 + case 6: + return V2_0_0_0 + case 4, 5: + return V1_0_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_10_0_0 + case 1: + return V0_9_0_0 + case 0: + return V0_8_2_0 default: - return MinVersion + return V2_1_0_0 } } diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/IBM/sarama/produce_response.go similarity index 91% rename from vendor/github.com/Shopify/sarama/produce_response.go rename to vendor/github.com/IBM/sarama/produce_response.go index edf978790c..de53e06a0c 100644 --- a/vendor/github.com/Shopify/sarama/produce_response.go +++ b/vendor/github.com/IBM/sarama/produce_response.go @@ -175,8 +175,33 @@ func (r *ProduceResponse) headerVersion() int16 { return 0 } +func (r *ProduceResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 7 +} + func (r *ProduceResponse) requiredVersion() KafkaVersion { - return MinVersion + switch r.Version { + case 7: + return V2_1_0_0 + case 6: + return V2_0_0_0 + case 4, 5: + return V1_0_0_0 + case 3: + return V0_11_0_0 + case 2: + return V0_10_0_0 + case 1: + return V0_9_0_0 + case 0: + return V0_8_2_0 + default: + return V2_1_0_0 + } +} + +func (r *ProduceResponse) throttleTime() time.Duration { + return r.ThrottleTime } func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/IBM/sarama/produce_set.go similarity index 97% rename from vendor/github.com/Shopify/sarama/produce_set.go rename to vendor/github.com/IBM/sarama/produce_set.go index 8d6980479e..004fc64903 100644 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/IBM/sarama/produce_set.go @@ -141,8 +141,13 @@ func (ps *produceSet) buildRequest() *ProduceRequest { req.TransactionalID = &ps.parent.conf.Producer.Transaction.ID } } - - if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { + if ps.parent.conf.Version.IsAtLeast(V1_0_0_0) { + req.Version = 5 + } + if ps.parent.conf.Version.IsAtLeast(V2_0_0_0) { + req.Version = 6 + } + if ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { req.Version = 7 } diff --git a/vendor/github.com/Shopify/sarama/quota_types.go b/vendor/github.com/IBM/sarama/quota_types.go similarity index 100% rename from vendor/github.com/Shopify/sarama/quota_types.go rename to vendor/github.com/IBM/sarama/quota_types.go diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/IBM/sarama/real_decoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/real_decoder.go rename to vendor/github.com/IBM/sarama/real_decoder.go diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/IBM/sarama/real_encoder.go similarity index 100% rename from vendor/github.com/Shopify/sarama/real_encoder.go rename to vendor/github.com/IBM/sarama/real_encoder.go diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/IBM/sarama/record.go similarity index 100% rename from vendor/github.com/Shopify/sarama/record.go rename to vendor/github.com/IBM/sarama/record.go diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/IBM/sarama/record_batch.go similarity index 95% rename from vendor/github.com/Shopify/sarama/record_batch.go rename to vendor/github.com/IBM/sarama/record_batch.go index d382ca4887..c422c5c2f2 100644 --- a/vendor/github.com/Shopify/sarama/record_batch.go +++ b/vendor/github.com/IBM/sarama/record_batch.go @@ -20,12 +20,12 @@ func (e recordsArray) encode(pe packetEncoder) error { } func (e recordsArray) decode(pd packetDecoder) error { + records := make([]Record, len(e)) for i := range e { - rec := &Record{} - if err := rec.decode(pd); err != nil { + if err := records[i].decode(pd); err != nil { return err } - e[i] = rec + e[i] = &records[i] } return nil } @@ -58,7 +58,7 @@ func (b *RecordBatch) LastOffset() int64 { func (b *RecordBatch) encode(pe packetEncoder) error { if b.Version != 2 { - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + return PacketEncodingError{fmt.Sprintf("unsupported record batch version (%d)", b.Version)} } pe.putInt64(b.FirstOffset) pe.push(&lengthField{}) diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/IBM/sarama/records.go similarity index 100% rename from vendor/github.com/Shopify/sarama/records.go rename to vendor/github.com/IBM/sarama/records.go diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/IBM/sarama/request.go similarity index 51% rename from vendor/github.com/Shopify/sarama/request.go rename to vendor/github.com/IBM/sarama/request.go index 5fec9776f2..e8e74ca34a 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/IBM/sarama/request.go @@ -12,6 +12,7 @@ type protocolBody interface { key() int16 version() int16 headerVersion() int16 + isValidVersion() bool requiredVersion() KafkaVersion } @@ -119,85 +120,114 @@ func decodeRequest(r io.Reader) (*request, int, error) { func allocateBody(key, version int16) protocolBody { switch key { case 0: - return &ProduceRequest{} + return &ProduceRequest{Version: version} case 1: return &FetchRequest{Version: version} case 2: return &OffsetRequest{Version: version} case 3: - return &MetadataRequest{} + return &MetadataRequest{Version: version} + // 4: LeaderAndIsrRequest + // 5: StopReplicaRequest + // 6: UpdateMetadataRequest + // 7: ControlledShutdownRequest case 8: return &OffsetCommitRequest{Version: version} case 9: return &OffsetFetchRequest{Version: version} case 10: - return &FindCoordinatorRequest{} + return &FindCoordinatorRequest{Version: version} case 11: - return &JoinGroupRequest{} + return &JoinGroupRequest{Version: version} case 12: - return &HeartbeatRequest{} + return &HeartbeatRequest{Version: version} case 13: - return &LeaveGroupRequest{} + return &LeaveGroupRequest{Version: version} case 14: - return &SyncGroupRequest{} + return &SyncGroupRequest{Version: version} case 15: - return &DescribeGroupsRequest{} + return &DescribeGroupsRequest{Version: version} case 16: - return &ListGroupsRequest{} + return &ListGroupsRequest{Version: version} case 17: - return &SaslHandshakeRequest{} + return &SaslHandshakeRequest{Version: version} case 18: return &ApiVersionsRequest{Version: version} case 19: - return &CreateTopicsRequest{} + return &CreateTopicsRequest{Version: version} case 20: - return &DeleteTopicsRequest{} + return &DeleteTopicsRequest{Version: version} case 21: - return &DeleteRecordsRequest{} + return &DeleteRecordsRequest{Version: version} case 22: return &InitProducerIDRequest{Version: version} + // 23: OffsetForLeaderEpochRequest case 24: - return &AddPartitionsToTxnRequest{} + return &AddPartitionsToTxnRequest{Version: version} case 25: - return &AddOffsetsToTxnRequest{} + return &AddOffsetsToTxnRequest{Version: version} case 26: - return &EndTxnRequest{} + return &EndTxnRequest{Version: version} + // 27: WriteTxnMarkersRequest case 28: - return &TxnOffsetCommitRequest{} + return &TxnOffsetCommitRequest{Version: version} case 29: - return &DescribeAclsRequest{} + return &DescribeAclsRequest{Version: int(version)} case 30: - return &CreateAclsRequest{} + return &CreateAclsRequest{Version: version} case 31: - return &DeleteAclsRequest{} + return &DeleteAclsRequest{Version: int(version)} case 32: - return &DescribeConfigsRequest{} + return &DescribeConfigsRequest{Version: version} case 33: - return &AlterConfigsRequest{} + return &AlterConfigsRequest{Version: version} + // 34: AlterReplicaLogDirsRequest case 35: - return &DescribeLogDirsRequest{} + return &DescribeLogDirsRequest{Version: version} case 36: - return &SaslAuthenticateRequest{} + return &SaslAuthenticateRequest{Version: version} case 37: - return &CreatePartitionsRequest{} + return &CreatePartitionsRequest{Version: version} + // 38: CreateDelegationTokenRequest + // 39: RenewDelegationTokenRequest + // 40: ExpireDelegationTokenRequest + // 41: DescribeDelegationTokenRequest case 42: - return &DeleteGroupsRequest{} + return &DeleteGroupsRequest{Version: version} + // 43: ElectLeadersRequest case 44: - return &IncrementalAlterConfigsRequest{} + return &IncrementalAlterConfigsRequest{Version: version} case 45: - return &AlterPartitionReassignmentsRequest{} + return &AlterPartitionReassignmentsRequest{Version: version} case 46: - return &ListPartitionReassignmentsRequest{} + return &ListPartitionReassignmentsRequest{Version: version} case 47: - return &DeleteOffsetsRequest{} + return &DeleteOffsetsRequest{Version: version} case 48: - return &DescribeClientQuotasRequest{} + return &DescribeClientQuotasRequest{Version: version} case 49: - return &AlterClientQuotasRequest{} + return &AlterClientQuotasRequest{Version: version} case 50: - return &DescribeUserScramCredentialsRequest{} + return &DescribeUserScramCredentialsRequest{Version: version} case 51: - return &AlterUserScramCredentialsRequest{} + return &AlterUserScramCredentialsRequest{Version: version} + // 52: VoteRequest + // 53: BeginQuorumEpochRequest + // 54: EndQuorumEpochRequest + // 55: DescribeQuorumRequest + // 56: AlterPartitionRequest + // 57: UpdateFeaturesRequest + // 58: EnvelopeRequest + // 59: FetchSnapshotRequest + // 60: DescribeClusterRequest + // 61: DescribeProducersRequest + // 62: BrokerRegistrationRequest + // 63: BrokerHeartbeatRequest + // 64: UnregisterBrokerRequest + // 65: DescribeTransactionsRequest + // 66: ListTransactionsRequest + // 67: AllocateProducerIdsRequest + // 68: ConsumerGroupHeartbeatRequest } return nil } diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/IBM/sarama/response_header.go similarity index 100% rename from vendor/github.com/Shopify/sarama/response_header.go rename to vendor/github.com/IBM/sarama/response_header.go diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/IBM/sarama/sarama.go similarity index 67% rename from vendor/github.com/Shopify/sarama/sarama.go rename to vendor/github.com/IBM/sarama/sarama.go index cb773f171f..4d5f60a666 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/IBM/sarama/sarama.go @@ -22,28 +22,32 @@ Metrics are exposed through https://github.com/rcrowley/go-metrics library in a Broker related metrics: - +----------------------------------------------+------------+---------------------------------------------------------------+ - | Name | Type | Description | - +----------------------------------------------+------------+---------------------------------------------------------------+ - | incoming-byte-rate | meter | Bytes/second read off all brokers | - | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | - | outgoing-byte-rate | meter | Bytes/second written off all brokers | - | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | - | request-rate | meter | Requests/second sent to all brokers | - | request-rate-for-broker- | meter | Requests/second sent to a given broker | - | request-size | histogram | Distribution of the request size in bytes for all brokers | - | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | - | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | - | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | - | response-rate | meter | Responses/second received from all brokers | - | response-rate-for-broker- | meter | Responses/second received from a given broker | - | response-size | histogram | Distribution of the response size in bytes for all brokers | - | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | - | requests-in-flight | counter | The current number of in-flight requests awaiting a response | - | | | for all brokers | - | requests-in-flight-for-broker- | counter | The current number of in-flight requests awaiting a response | - | | | for a given broker | - +----------------------------------------------+------------+---------------------------------------------------------------+ + +---------------------------------------------------------+------------+---------------------------------------------------------------+ + | Name | Type | Description | + +---------------------------------------------------------+------------+---------------------------------------------------------------+ + | incoming-byte-rate | meter | Bytes/second read off all brokers | + | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | + | outgoing-byte-rate | meter | Bytes/second written off all brokers | + | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | + | request-rate | meter | Requests/second sent to all brokers | + | request-rate-for-broker- | meter | Requests/second sent to a given broker | + | request-size | histogram | Distribution of the request size in bytes for all brokers | + | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | + | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | + | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | + | response-rate | meter | Responses/second received from all brokers | + | response-rate-for-broker- | meter | Responses/second received from a given broker | + | response-size | histogram | Distribution of the response size in bytes for all brokers | + | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + | requests-in-flight | counter | The current number of in-flight requests awaiting a response | + | | | for all brokers | + | requests-in-flight-for-broker- | counter | The current number of in-flight requests awaiting a response | + | | | for a given broker | + | protocol-requests-rate- | meter | Number of api requests sent to the brokers for all brokers | + | | | https://kafka.apache.org/protocol.html#protocol_api_keys | | + | protocol-requests-rate--for-broker- | meter | Number of packets sent to the brokers by api-key for a given | + | | | broker | + +---------------------------------------------------------+------------+---------------------------------------------------------------+ Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. @@ -87,7 +91,7 @@ import ( var ( // Logger is the instance of a StdLogger interface that Sarama writes connection - // management events to. By default it is set to discard all log messages via ioutil.Discard, + // management events to. By default it is set to discard all log messages via io.Discard, // but you can set it to redirect wherever you want. Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags) diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go similarity index 89% rename from vendor/github.com/Shopify/sarama/sasl_authenticate_request.go rename to vendor/github.com/IBM/sarama/sasl_authenticate_request.go index 5bb0988ea5..3a562a53b8 100644 --- a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go +++ b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go @@ -31,6 +31,10 @@ func (r *SaslAuthenticateRequest) headerVersion() int16 { return 1 } +func (r *SaslAuthenticateRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go similarity index 92% rename from vendor/github.com/Shopify/sarama/sasl_authenticate_response.go rename to vendor/github.com/IBM/sarama/sasl_authenticate_response.go index 37c8e45dae..ae52cde1c5 100644 --- a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go +++ b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go @@ -59,6 +59,10 @@ func (r *SaslAuthenticateResponse) headerVersion() int16 { return 0 } +func (r *SaslAuthenticateResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/IBM/sarama/sasl_handshake_request.go similarity index 78% rename from vendor/github.com/Shopify/sarama/sasl_handshake_request.go rename to vendor/github.com/IBM/sarama/sasl_handshake_request.go index 74dc3072f4..410a5b0eaa 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go +++ b/vendor/github.com/IBM/sarama/sasl_handshake_request.go @@ -33,6 +33,15 @@ func (r *SaslHandshakeRequest) headerVersion() int16 { return 1 } +func (r *SaslHandshakeRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { - return V0_10_0_0 + switch r.Version { + case 1: + return V1_0_0_0 + default: + return V0_10_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/IBM/sarama/sasl_handshake_response.go similarity index 77% rename from vendor/github.com/Shopify/sarama/sasl_handshake_response.go rename to vendor/github.com/IBM/sarama/sasl_handshake_response.go index 69dfc3178e..502732cbd3 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go +++ b/vendor/github.com/IBM/sarama/sasl_handshake_response.go @@ -1,6 +1,7 @@ package sarama type SaslHandshakeResponse struct { + Version int16 Err KError EnabledMechanisms []string } @@ -30,13 +31,22 @@ func (r *SaslHandshakeResponse) key() int16 { } func (r *SaslHandshakeResponse) version() int16 { - return 0 + return r.Version } func (r *SaslHandshakeResponse) headerVersion() int16 { return 0 } +func (r *SaslHandshakeResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 1 +} + func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { - return V0_10_0_0 + switch r.Version { + case 1: + return V1_0_0_0 + default: + return V0_10_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/IBM/sarama/scram_formatter.go similarity index 100% rename from vendor/github.com/Shopify/sarama/scram_formatter.go rename to vendor/github.com/IBM/sarama/scram_formatter.go diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/IBM/sarama/sticky_assignor_user_data.go similarity index 100% rename from vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go rename to vendor/github.com/IBM/sarama/sticky_assignor_user_data.go diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/IBM/sarama/sync_group_request.go similarity index 93% rename from vendor/github.com/Shopify/sarama/sync_group_request.go rename to vendor/github.com/IBM/sarama/sync_group_request.go index 33ed3baccb..95efc28580 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_request.go +++ b/vendor/github.com/IBM/sarama/sync_group_request.go @@ -123,12 +123,23 @@ func (r *SyncGroupRequest) headerVersion() int16 { return 1 } +func (r *SyncGroupRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *SyncGroupRequest) requiredVersion() KafkaVersion { - switch { - case r.Version >= 3: + switch r.Version { + case 3: + return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: return V2_3_0_0 } - return V0_9_0_0 } func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/IBM/sarama/sync_group_response.go similarity index 77% rename from vendor/github.com/Shopify/sarama/sync_group_response.go rename to vendor/github.com/IBM/sarama/sync_group_response.go index 41b63b3d03..f7da15b4f1 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_response.go +++ b/vendor/github.com/IBM/sarama/sync_group_response.go @@ -1,9 +1,11 @@ package sarama +import "time" + type SyncGroupResponse struct { // Version defines the protocol version to use for encode and decode Version int16 - // ThrottleTimeMs contains the duration in milliseconds for which the + // ThrottleTime contains the duration in milliseconds for which the // request was throttled due to a quota violation, or zero if the request // did not violate any quota. ThrottleTime int32 @@ -57,10 +59,25 @@ func (r *SyncGroupResponse) headerVersion() int16 { return 0 } +func (r *SyncGroupResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 3 +} + func (r *SyncGroupResponse) requiredVersion() KafkaVersion { switch r.Version { - case 1, 2, 3: + case 3: + return V2_3_0_0 + case 2: + return V2_0_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_9_0_0 + default: return V2_3_0_0 } - return V0_9_0_0 +} + +func (r *SyncGroupResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTime) * time.Millisecond } diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/IBM/sarama/sync_producer.go similarity index 98% rename from vendor/github.com/Shopify/sarama/sync_producer.go rename to vendor/github.com/IBM/sarama/sync_producer.go index 8765ac3368..3119baa6d7 100644 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/IBM/sarama/sync_producer.go @@ -33,7 +33,7 @@ type SyncProducer interface { // TxnStatus return current producer transaction status. TxnStatus() ProducerTxnStatusFlag - // IsTransactional return true when current producer is is transactional. + // IsTransactional return true when current producer is transactional. IsTransactional() bool // BeginTxn mark current transaction as ready. diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/IBM/sarama/timestamp.go similarity index 100% rename from vendor/github.com/Shopify/sarama/timestamp.go rename to vendor/github.com/IBM/sarama/timestamp.go diff --git a/vendor/github.com/Shopify/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go similarity index 91% rename from vendor/github.com/Shopify/sarama/transaction_manager.go rename to vendor/github.com/IBM/sarama/transaction_manager.go index e18abecd38..ca7e13dab0 100644 --- a/vendor/github.com/Shopify/sarama/transaction_manager.go +++ b/vendor/github.com/IBM/sarama/transaction_manager.go @@ -14,7 +14,7 @@ type ProducerTxnStatusFlag int16 const ( // ProducerTxnFlagUninitialized when txnmgr is created ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota - // ProducerTxnFlagInitializing when txnmgr is initilizing + // ProducerTxnFlagInitializing when txnmgr is initializing ProducerTxnFlagInitializing // ProducerTxnFlagReady when is ready to receive transaction ProducerTxnFlagReady @@ -22,7 +22,7 @@ const ( ProducerTxnFlagInTransaction // ProducerTxnFlagEndTransaction when transaction will be committed ProducerTxnFlagEndTransaction - // ProducerTxnFlagInError whan having abortable or fatal error + // ProducerTxnFlagInError when having abortable or fatal error ProducerTxnFlagInError // ProducerTxnFlagCommittingTransaction when committing txn ProducerTxnFlagCommittingTransaction @@ -117,13 +117,13 @@ var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{ ProducerTxnFlagReady, ProducerTxnFlagInError, }, - // When we need are initilizing + // When we need are initializing ProducerTxnFlagInitializing: { ProducerTxnFlagInitializing, ProducerTxnFlagReady, ProducerTxnFlagInError, }, - // When we have initilized transactional producer + // When we have initialized transactional producer ProducerTxnFlagReady: { ProducerTxnFlagInTransaction, }, @@ -161,8 +161,10 @@ type topicPartition struct { } // to ensure that we don't do a full scan every time a partition or an offset is added. -type topicPartitionSet map[topicPartition]struct{} -type topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata +type ( + topicPartitionSet map[topicPartition]struct{} + topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata +) func (s topicPartitionSet) mapToRequest() map[string][]int32 { result := make(map[string][]int32, len(s)) @@ -315,12 +317,20 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, if err != nil { return true, err } - response, err := coordinator.AddOffsetsToTxn(&AddOffsetsToTxnRequest{ + request := &AddOffsetsToTxnRequest{ TransactionalID: t.transactionalID, ProducerEpoch: t.producerEpoch, ProducerID: t.producerID, GroupID: groupId, - }) + } + if t.client.Config().Version.IsAtLeast(V2_7_0_0) { + // Version 2 adds the support for new error code PRODUCER_FENCED. + request.Version = 2 + } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } + response, err := coordinator.AddOffsetsToTxn(request) if err != nil { // If an error occurred try to refresh current transaction coordinator. _ = coordinator.Close() @@ -390,13 +400,21 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, if err != nil { return resultOffsets, true, err } - responses, err := consumerGroupCoordinator.TxnOffsetCommit(&TxnOffsetCommitRequest{ + request := &TxnOffsetCommitRequest{ TransactionalID: t.transactionalID, ProducerEpoch: t.producerEpoch, ProducerID: t.producerID, GroupID: groupId, Topics: offsets.mapToRequest(), - }) + } + if t.client.Config().Version.IsAtLeast(V2_1_0_0) { + // Version 2 adds the committed leader epoch. + request.Version = 2 + } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } + responses, err := consumerGroupCoordinator.TxnOffsetCommit(request) if err != nil { _ = consumerGroupCoordinator.Close() _ = t.client.RefreshCoordinator(groupId) @@ -466,13 +484,24 @@ func (t *transactionManager) initProducerId() (int64, int16, error) { } if t.client.Config().Version.IsAtLeast(V2_5_0_0) { - req.Version = 3 + if t.client.Config().Version.IsAtLeast(V2_7_0_0) { + // Version 4 adds the support for new error code PRODUCER_FENCED. + req.Version = 4 + } else { + // Version 3 adds ProducerId and ProducerEpoch, allowing producers to try + // to resume after an INVALID_PRODUCER_EPOCH error + req.Version = 3 + } isEpochBump = t.producerID != noProducerID && t.producerEpoch != noProducerEpoch t.coordinatorSupportsBumpingEpoch = true req.ProducerID = t.producerID req.ProducerEpoch = t.producerEpoch } else if t.client.Config().Version.IsAtLeast(V2_4_0_0) { + // Version 2 is the first flexible version. req.Version = 2 + } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + req.Version = 1 } if isEpochBump { @@ -540,9 +569,8 @@ func (t *transactionManager) initProducerId() (int64, int16, error) { return response.ProducerID, response.ProducerEpoch, false, nil } switch response.Err { - case ErrConsumerCoordinatorNotAvailable: - fallthrough - case ErrNotCoordinatorForConsumer: + // Retriable errors + case ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer, ErrOffsetsLoadInProgress: if t.isTransactional() { _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) @@ -610,12 +638,20 @@ func (t *transactionManager) endTxn(commit bool) error { if err != nil { return true, err } - response, err := coordinator.EndTxn(&EndTxnRequest{ + request := &EndTxnRequest{ TransactionalID: t.transactionalID, ProducerEpoch: t.producerEpoch, ProducerID: t.producerID, TransactionResult: commit, - }) + } + if t.client.Config().Version.IsAtLeast(V2_7_0_0) { + // Version 2 adds the support for new error code PRODUCER_FENCED. + request.Version = 2 + } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } + response, err := coordinator.EndTxn(request) if err != nil { // Always retry on network error _ = coordinator.Close() @@ -660,7 +696,7 @@ func (t *transactionManager) finishTransaction(commit bool) error { t.mutex.Lock() defer t.mutex.Unlock() - // Ensure no error when committing or abording + // Ensure no error when committing or aborting if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 { return t.lastError } else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 { @@ -779,13 +815,20 @@ func (t *transactionManager) publishTxnPartitions() error { if err != nil { return true, err } - addPartResponse, err := coordinator.AddPartitionsToTxn(&AddPartitionsToTxnRequest{ + request := &AddPartitionsToTxnRequest{ TransactionalID: t.transactionalID, ProducerID: t.producerID, ProducerEpoch: t.producerEpoch, TopicPartitions: t.pendingPartitionsInCurrentTxn.mapToRequest(), - }) - + } + if t.client.Config().Version.IsAtLeast(V2_7_0_0) { + // Version 2 adds the support for new error code PRODUCER_FENCED. + request.Version = 2 + } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) { + // Version 1 is the same as version 0. + request.Version = 1 + } + addPartResponse, err := coordinator.AddPartitionsToTxn(request) if err != nil { _ = coordinator.Close() _ = t.client.RefreshTransactionCoordinator(t.transactionalID) diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go similarity index 73% rename from vendor/github.com/Shopify/sarama/txn_offset_commit_request.go rename to vendor/github.com/IBM/sarama/txn_offset_commit_request.go index c4043a3352..ca13afb3b2 100644 --- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go +++ b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go @@ -1,6 +1,7 @@ package sarama type TxnOffsetCommitRequest struct { + Version int16 TransactionalID string GroupID string ProducerID int64 @@ -29,7 +30,7 @@ func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { return err } for _, partition := range partitions { - if err := partition.encode(pe); err != nil { + if err := partition.encode(pe, t.Version); err != nil { return err } } @@ -39,6 +40,7 @@ func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { } func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + t.Version = version if t.TransactionalID, err = pd.getString(); err != nil { return err } @@ -88,26 +90,49 @@ func (a *TxnOffsetCommitRequest) key() int16 { } func (a *TxnOffsetCommitRequest) version() int16 { - return 0 + return a.Version } func (a *TxnOffsetCommitRequest) headerVersion() int16 { return 1 } +func (a *TxnOffsetCommitRequest) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_1_0_0 + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_1_0_0 + } } type PartitionOffsetMetadata struct { + // Partition contains the index of the partition within the topic. Partition int32 - Offset int64 - Metadata *string + // Offset contains the message offset to be committed. + Offset int64 + // LeaderEpoch contains the leader epoch of the last consumed record. + LeaderEpoch int32 + // Metadata contains any associated metadata the client wants to keep. + Metadata *string } -func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error { +func (p *PartitionOffsetMetadata) encode(pe packetEncoder, version int16) error { pe.putInt32(p.Partition) pe.putInt64(p.Offset) + + if version >= 2 { + pe.putInt32(p.LeaderEpoch) + } + if err := pe.putNullableString(p.Metadata); err != nil { return err } @@ -122,6 +147,13 @@ func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err e if p.Offset, err = pd.getInt64(); err != nil { return err } + + if version >= 2 { + if p.LeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } + if p.Metadata, err = pd.getNullableString(); err != nil { return err } diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go similarity index 80% rename from vendor/github.com/Shopify/sarama/txn_offset_commit_response.go rename to vendor/github.com/IBM/sarama/txn_offset_commit_response.go index 94d8029dac..d5144faf77 100644 --- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go +++ b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go @@ -5,6 +5,7 @@ import ( ) type TxnOffsetCommitResponse struct { + Version int16 ThrottleTime time.Duration Topics map[string][]*PartitionError } @@ -33,6 +34,7 @@ func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error { } func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + t.Version = version throttleTime, err := pd.getInt32() if err != nil { return err @@ -75,13 +77,30 @@ func (a *TxnOffsetCommitResponse) key() int16 { } func (a *TxnOffsetCommitResponse) version() int16 { - return 0 + return a.Version } func (a *TxnOffsetCommitResponse) headerVersion() int16 { return 0 } +func (a *TxnOffsetCommitResponse) isValidVersion() bool { + return a.Version >= 0 && a.Version <= 2 +} + func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { - return V0_11_0_0 + switch a.Version { + case 2: + return V2_1_0_0 + case 1: + return V2_0_0_0 + case 0: + return V0_11_0_0 + default: + return V2_1_0_0 + } +} + +func (r *TxnOffsetCommitResponse) throttleTime() time.Duration { + return r.ThrottleTime } diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go similarity index 84% rename from vendor/github.com/Shopify/sarama/utils.go rename to vendor/github.com/IBM/sarama/utils.go index 74c3089200..feadc0065b 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/IBM/sarama/utils.go @@ -191,6 +191,14 @@ var ( V3_2_1_0 = newKafkaVersion(3, 2, 1, 0) V3_2_2_0 = newKafkaVersion(3, 2, 2, 0) V3_2_3_0 = newKafkaVersion(3, 2, 3, 0) + V3_3_0_0 = newKafkaVersion(3, 3, 0, 0) + V3_3_1_0 = newKafkaVersion(3, 3, 1, 0) + V3_3_2_0 = newKafkaVersion(3, 3, 2, 0) + V3_4_0_0 = newKafkaVersion(3, 4, 0, 0) + V3_4_1_0 = newKafkaVersion(3, 4, 1, 0) + V3_5_0_0 = newKafkaVersion(3, 5, 0, 0) + V3_5_1_0 = newKafkaVersion(3, 5, 1, 0) + V3_6_0_0 = newKafkaVersion(3, 6, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -244,32 +252,44 @@ var ( V3_2_1_0, V3_2_2_0, V3_2_3_0, + V3_3_0_0, + V3_3_1_0, + V3_3_2_0, + V3_4_0_0, + V3_4_1_0, + V3_5_0_0, + V3_5_1_0, + V3_6_0_0, } MinVersion = V0_8_2_0 - MaxVersion = V3_2_3_0 - DefaultVersion = V1_0_0_0 + MaxVersion = V3_6_0_0 + DefaultVersion = V2_1_0_0 - // reduced set of versions to matrix test + // reduced set of protocol versions to matrix test fvtRangeVersions = []KafkaVersion{ V0_8_2_2, V0_10_2_2, V1_0_2_0, V1_1_1_0, V2_0_1_0, - V2_1_1_0, V2_2_2_0, - V2_3_1_0, V2_4_1_0, - V2_5_1_0, V2_6_2_0, - V2_7_1_0, V2_8_2_0, - V3_0_2_0, V3_1_2_0, - V3_2_3_0, + V3_3_2_0, + V3_6_0_0, } ) +var ( + // This regex validates that a string complies with the pre kafka 1.0.0 format for version strings, for example 0.11.0.3 + validPreKafka1Version = regexp.MustCompile(`^0\.\d+\.\d+\.\d+$`) + + // This regex validates that a string complies with the post Kafka 1.0.0 format, for example 1.0.0 + validPostKafka1Version = regexp.MustCompile(`^\d+\.\d+\.\d+$`) +) + // ParseKafkaVersion parses and returns kafka version or error from a string func ParseKafkaVersion(s string) (KafkaVersion, error) { if len(s) < 5 { @@ -278,9 +298,9 @@ func ParseKafkaVersion(s string) (KafkaVersion, error) { var major, minor, veryMinor, patch uint var err error if s[0] == '0' { - err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) + err = scanKafkaVersion(s, validPreKafka1Version, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) } else { - err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) + err = scanKafkaVersion(s, validPostKafka1Version, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) } if err != nil { return DefaultVersion, err @@ -288,8 +308,8 @@ func ParseKafkaVersion(s string) (KafkaVersion, error) { return newKafkaVersion(major, minor, veryMinor, patch), nil } -func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error { - if !regexp.MustCompile(pattern).MatchString(s) { +func scanKafkaVersion(s string, pattern *regexp.Regexp, format string, v [3]*uint) error { + if !pattern.MatchString(s) { return fmt.Errorf("invalid version `%s`", s) } _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) diff --git a/vendor/github.com/Shopify/sarama/version.go b/vendor/github.com/IBM/sarama/version.go similarity index 100% rename from vendor/github.com/Shopify/sarama/version.go rename to vendor/github.com/IBM/sarama/version.go diff --git a/vendor/github.com/IBM/sarama/zstd.go b/vendor/github.com/IBM/sarama/zstd.go new file mode 100644 index 0000000000..6073ce7c44 --- /dev/null +++ b/vendor/github.com/IBM/sarama/zstd.go @@ -0,0 +1,74 @@ +package sarama + +import ( + "sync" + + "github.com/klauspost/compress/zstd" +) + +// zstdMaxBufferedEncoders maximum number of not-in-use zstd encoders +// If the pool of encoders is exhausted then new encoders will be created on the fly +const zstdMaxBufferedEncoders = 1 + +type ZstdEncoderParams struct { + Level int +} +type ZstdDecoderParams struct { +} + +var zstdDecMap sync.Map + +var zstdAvailableEncoders sync.Map + +func getZstdEncoderChannel(params ZstdEncoderParams) chan *zstd.Encoder { + if c, ok := zstdAvailableEncoders.Load(params); ok { + return c.(chan *zstd.Encoder) + } + c, _ := zstdAvailableEncoders.LoadOrStore(params, make(chan *zstd.Encoder, zstdMaxBufferedEncoders)) + return c.(chan *zstd.Encoder) +} + +func getZstdEncoder(params ZstdEncoderParams) *zstd.Encoder { + select { + case enc := <-getZstdEncoderChannel(params): + return enc + default: + encoderLevel := zstd.SpeedDefault + if params.Level != CompressionLevelDefault { + encoderLevel = zstd.EncoderLevelFromZstd(params.Level) + } + zstdEnc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true), + zstd.WithEncoderLevel(encoderLevel), + zstd.WithEncoderConcurrency(1)) + return zstdEnc + } +} + +func releaseEncoder(params ZstdEncoderParams, enc *zstd.Encoder) { + select { + case getZstdEncoderChannel(params) <- enc: + default: + } +} + +func getDecoder(params ZstdDecoderParams) *zstd.Decoder { + if ret, ok := zstdDecMap.Load(params); ok { + return ret.(*zstd.Decoder) + } + // It's possible to race and create multiple new readers. + // Only one will survive GC after use. + zstdDec, _ := zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + zstdDecMap.Store(params, zstdDec) + return zstdDec +} + +func zstdDecompress(params ZstdDecoderParams, dst, src []byte) ([]byte, error) { + return getDecoder(params).DecodeAll(src, dst) +} + +func zstdCompress(params ZstdEncoderParams, dst, src []byte) ([]byte, error) { + enc := getZstdEncoder(params) + out := enc.EncodeAll(src, dst) + releaseEncoder(params, enc) + return out, nil +} diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md deleted file mode 100644 index c2f92ec9a1..0000000000 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ /dev/null @@ -1,1187 +0,0 @@ -# Changelog - -## Version 1.31.1 (2022-02-01) - -- #2126 - @bai - Populate missing kafka versions -- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image -- #2123 - @bai - Update klauspost/compress to 0.14 -- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy -- #2119 - @bai - Add Kafka 3.1.0 version number -- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption -- #2051 - @seveas - Expose the TLS connection state of a broker connection -- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys -- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup -- #2113 - @mosceo - Fix typo - -## Version 1.31.0 (2022-01-18) - -## What's Changed -### :tada: New Features / Improvements -* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/Shopify/sarama/pull/2088 -* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/Shopify/sarama/pull/1686 -* Support request pipelining in AsyncProducer by @slaunay in https://github.com/Shopify/sarama/pull/2094 -### :bug: Fixes -* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/Shopify/sarama/pull/2080 -* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/Shopify/sarama/pull/2081 -* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/Shopify/sarama/pull/2082 -* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/Shopify/sarama/pull/2096 -* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/Shopify/sarama/pull/2107 -* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/Shopify/sarama/pull/2108 -* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/Shopify/sarama/pull/2078 -* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/Shopify/sarama/pull/2111 -### :wrench: Maintenance -* chore: bump runtime and test dependencies by @dnwe in https://github.com/Shopify/sarama/pull/2100 -### :memo: Documentation -* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/Shopify/sarama/pull/2099 -### :heavy_plus_sign: Other Changes -* Fix typo by @mosceo in https://github.com/Shopify/sarama/pull/2084 - -## New Contributors -* @grongor made their first contribution in https://github.com/Shopify/sarama/pull/2080 -* @fengyinqiao made their first contribution in https://github.com/Shopify/sarama/pull/2088 -* @xujianhai666 made their first contribution in https://github.com/Shopify/sarama/pull/1686 -* @mosceo made their first contribution in https://github.com/Shopify/sarama/pull/2084 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.1...v1.31.0 - -## Version 1.30.1 (2021-12-04) - -## What's Changed -### :tada: New Features / Improvements -* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/Shopify/sarama/pull/2045 -### :bug: Fixes -* fix: set min-go-version to 1.16 by @troyanov in https://github.com/Shopify/sarama/pull/2048 -* logger: fix debug logs' formatting directives by @utrack in https://github.com/Shopify/sarama/pull/2054 -* fix: stuck on the batch with zero records length by @pachmu in https://github.com/Shopify/sarama/pull/2057 -* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/Shopify/sarama/pull/2076 -### :wrench: Maintenance -* chore: add release notes configuration by @dnwe in https://github.com/Shopify/sarama/pull/2046 -* chore: confluent platform version bump by @lizthegrey in https://github.com/Shopify/sarama/pull/2070 - -## Notes -* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x - -## New Contributors -* @troyanov made their first contribution in https://github.com/Shopify/sarama/pull/2048 -* @lizthegrey made their first contribution in https://github.com/Shopify/sarama/pull/2045 -* @utrack made their first contribution in https://github.com/Shopify/sarama/pull/2054 -* @pachmu made their first contribution in https://github.com/Shopify/sarama/pull/2057 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.30.0...v1.30.1 - -## Version 1.30.0 (2021-09-29) - -⚠️ This release has been superseded by v1.30.1 and should _not_ be used. - -**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076 - ---- - -ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x** - ---- - -# New Features / Improvements - -- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh -- #2000 - @matzew - Using xdg-go module for SCRAM -- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures -- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM -- #2006 - @faillefer - Add support for DeleteOffsets operation -- #1909 - @agriffaut - KIP-546 Client quota APIs -- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state -- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger -- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log -- #2019 - @dnwe - feat: add logging & a metric for producer throttle -- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface -- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol -- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open -- #2034 - @bai - Add support for kafka 3.0.0 - -# Fixes - -- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest -- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation -- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls -- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true -- #2007 - @bai - Add support for Go 1.17 -- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks -- #2010 - @dnwe - chore: enable exportloopref and misspell linters -- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements -- #2015 - @bai - Change default branch to main -- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID() -- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0 -- #2016 - @dnwe - chore: replace deprecated Go calls -- #2017 - @dnwe - chore: delete legacy vagrant script -- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test -- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5 -- #2033 - @bai - Update dependencies -- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method -- #2035 - @dnwe - chore: populate the missing kafka versions -- #2038 - @dnwe - feat: add a fuzzing workflow to github actions - -## New Contributors -* @zifengyu made their first contribution in https://github.com/Shopify/sarama/pull/1983 -* @doxsch made their first contribution in https://github.com/Shopify/sarama/pull/1990 -* @LubergAlexander made their first contribution in https://github.com/Shopify/sarama/pull/1988 -* @HurSungYun made their first contribution in https://github.com/Shopify/sarama/pull/2001 -* @gdm85 made their first contribution in https://github.com/Shopify/sarama/pull/2003 -* @qiangmzsx made their first contribution in https://github.com/Shopify/sarama/pull/1973 -* @zhaomoran made their first contribution in https://github.com/Shopify/sarama/pull/1992 -* @faillefer made their first contribution in https://github.com/Shopify/sarama/pull/2006 -* @crivera-fastly made their first contribution in https://github.com/Shopify/sarama/pull/1718 -* @null-sleep made their first contribution in https://github.com/Shopify/sarama/pull/1984 - -**Full Changelog**: https://github.com/Shopify/sarama/compare/v1.29.1...v1.30.0 - -## Version 1.29.1 (2021-06-24) - -# New Features / Improvements - -- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API -- #1964 - @ajanikow - Add DelegationToken ResourceType - -# Fixes - -- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire -- #1971 - @KerryJava - fix kafka-producer-performance throughput panic -- #1968 - @dnwe - chore: bump golang.org/x versions -- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers -- #1963 - @dnwe - fix: ensure backoff timer is re-used -- #1949 - @dnwe - fix: explicitly use uint64 for payload length - -## Version 1.29.0 (2021-05-07) - -### New Features / Improvements - -- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API -- #1869 - @wyndhblb - zstd: encode+decode performance improvements -- #1541 - @izolight - add String, (Un)MarshalText for acl types. -- #1921 - @bai - Add support for Kafka 2.8.0 - -### Fixes -- #1936 - @dnwe - fix(consumer): follow preferred broker -- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication -- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response) -- #1926 - @dnwe - fix: correct initial CodeQL findings -- #1925 - @bai - Test out CodeQL -- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos -- #1922 - @bai - Update go dependencies -- #1898 - @mmaslankaprv - Parsing only known control batches value -- #1887 - @withshubh - Fix: issues affecting code quality - -## Version 1.28.0 (2021-02-15) - -**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.** - -- #1870 - @kvch - Update Kerberos library to latest major -- #1876 - @bai - Update docs, reference pkg.go.dev -- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close -- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages -- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies -- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy -- #1862 - @bai - Fix CI setenv permissions issues -- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev -- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica - -## Version 1.27.2 (2020-10-21) - -### Improvements - -#1750 - @krantideep95 Adds missing mock responses for mocking consumer group - -## Fixes - -#1817 - reverts #1785 - Add private method to Client interface to prevent implementation - -## Version 1.27.1 (2020-10-07) - -### Improvements - -#1775 - @d1egoaz - Adds a Producer Interceptor example -#1781 - @justin-chen - Refresh brokers given list of seed brokers -#1784 - @justin-chen - Add randomize seed broker method -#1790 - @d1egoaz - remove example binary -#1798 - @bai - Test against Go 1.15 -#1785 - @justin-chen - Add private method to Client interface to prevent implementation -#1802 - @uvw - Support Go 1.13 error unwrapping - -## Fixes - -#1791 - @stanislavkozlovski - bump default version to 1.0.0 - -## Version 1.27.0 (2020-08-11) - -### Improvements - -#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration -#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests -#1699 - @wclaeys - Consumer group support for manually comitting offsets -#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0 -#1726 - @d1egoaz - Include zstd on the functional tests -#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors -#1738 - @varun06 - fixed variable names that are named same as some std lib package names -#1741 - @varun06 - updated zstd dependency to latest v1.10.10 -#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base -#1763 - @alrs - remove deprecated tls options from test -#1769 - @bai - Add support for Kafka 2.6.0 - -## Fixes - -#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication -#1744 - @alrs - Fix isBalanced Function Signature - -## Version 1.26.4 (2020-05-19) - -## Fixes - -- #1701 - @d1egoaz - Set server name only for the current broker -- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka - -## Version 1.26.3 (2020-05-07) - -## Fixes - -- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config - -## Version 1.26.2 (2020-05-06) - -## ⚠️ Known Issues - -This release has been marked as not ready for production and may be unstable, please use v1.26.4. - -### Improvements - -- #1560 - @iyacontrol - add sync pool for gzip 1-9 -- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID -- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs -- #1632 - @bai - Add support for Go 1.14 -- #1640 - @random-dwi - Feature/fix list partition reassignments -- #1646 - @mimaison - Add DescribeLogDirs to admin client -- #1667 - @bai - Add support for kafka 2.5.0 - -## Fixes - -- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0 -- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine -- #1602 - @d1egoaz - adds a note about consumer groups Consume method -- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly -- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented -- #1614 - @alrs - produce_response.go: Remove Unused Functions -- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables -- #1639 - @agriffaut - Handle errors with no message but error code -- #1643 - @kzinglzy - fix `config.net.keepalive` -- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs -- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata -- #1650 - @lavoiesl - Return the response error in heartbeatLoop -- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die -- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy. - -## Version 1.26.1 (2020-02-04) - -Improvements: -- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) -- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595)) -- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573)) -- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592)) - -Bug Fixes: -- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) -- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) - -## Version 1.26.0 (2020-01-24) - -New Features: -- Enable zstd compression - ([1574](https://github.com/Shopify/sarama/pull/1574), - [1582](https://github.com/Shopify/sarama/pull/1582)) -- Support headers in tools kafka-console-producer - ([1549](https://github.com/Shopify/sarama/pull/1549)) - -Improvements: -- Add SASL AuthIdentity to SASL frames (authzid) - ([1585](https://github.com/Shopify/sarama/pull/1585)). - -Bug Fixes: -- Sending messages with ZStd compression enabled fails in multiple ways - ([1252](https://github.com/Shopify/sarama/issues/1252)). -- Use the broker for any admin on BrokerConfig - ([1571](https://github.com/Shopify/sarama/pull/1571)). -- Set DescribeConfigRequest Version field - ([1576](https://github.com/Shopify/sarama/pull/1576)). -- ConsumerGroup flooding logs with client/metadata update req - ([1578](https://github.com/Shopify/sarama/pull/1578)). -- MetadataRequest version in DescribeCluster - ([1580](https://github.com/Shopify/sarama/pull/1580)). -- Fix deadlock in consumer group handleError - ([1581](https://github.com/Shopify/sarama/pull/1581)) -- Fill in the Fetch{Request,Response} protocol - ([1582](https://github.com/Shopify/sarama/pull/1582)). -- Retry topic request on ControllerNotAvailable - ([1586](https://github.com/Shopify/sarama/pull/1586)). - -## Version 1.25.0 (2020-01-13) - -New Features: -- Support TLS protocol in kafka-producer-performance - ([1538](https://github.com/Shopify/sarama/pull/1538)). -- Add support for kafka 2.4.0 - ([1552](https://github.com/Shopify/sarama/pull/1552)). - -Improvements: -- Allow the Consumer to disable auto-commit offsets - ([1164](https://github.com/Shopify/sarama/pull/1164)). -- Produce records with consistent timestamps - ([1455](https://github.com/Shopify/sarama/pull/1455)). - -Bug Fixes: -- Fix incorrect SetTopicMetadata name mentions - ([1534](https://github.com/Shopify/sarama/pull/1534)). -- Fix client.tryRefreshMetadata Println - ([1535](https://github.com/Shopify/sarama/pull/1535)). -- Fix panic on calling updateMetadata on closed client - ([1531](https://github.com/Shopify/sarama/pull/1531)). -- Fix possible faulty metrics in TestFuncProducing - ([1545](https://github.com/Shopify/sarama/pull/1545)). - -## Version 1.24.1 (2019-10-31) - -New Features: -- Add DescribeLogDirs Request/Response pair - ([1520](https://github.com/Shopify/sarama/pull/1520)). - -Bug Fixes: -- Fix ClusterAdmin returning invalid controller ID on DescribeCluster - ([1518](https://github.com/Shopify/sarama/pull/1518)). -- Fix issue with consumergroup not rebalancing when new partition is added - ([1525](https://github.com/Shopify/sarama/pull/1525)). -- Ensure consistent use of read/write deadlines - ([1529](https://github.com/Shopify/sarama/pull/1529)). - -## Version 1.24.0 (2019-10-09) - -New Features: -- Add sticky partition assignor - ([1416](https://github.com/Shopify/sarama/pull/1416)). -- Switch from cgo zstd package to pure Go implementation - ([1477](https://github.com/Shopify/sarama/pull/1477)). - -Improvements: -- Allow creating ClusterAdmin from client - ([1415](https://github.com/Shopify/sarama/pull/1415)). -- Set KafkaVersion in ListAcls method - ([1452](https://github.com/Shopify/sarama/pull/1452)). -- Set request version in CreateACL ClusterAdmin method - ([1458](https://github.com/Shopify/sarama/pull/1458)). -- Set request version in DeleteACL ClusterAdmin method - ([1461](https://github.com/Shopify/sarama/pull/1461)). -- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest - ([1464](https://github.com/Shopify/sarama/pull/1464)). -- Remove direct usage of gofork - ([1465](https://github.com/Shopify/sarama/pull/1465)). -- Add support for Go 1.13 - ([1478](https://github.com/Shopify/sarama/pull/1478)). -- Improve behavior of NewMockListAclsResponse - ([1481](https://github.com/Shopify/sarama/pull/1481)). - -Bug Fixes: -- Fix race condition in consumergroup example - ([1434](https://github.com/Shopify/sarama/pull/1434)). -- Fix brokerProducer goroutine leak - ([1442](https://github.com/Shopify/sarama/pull/1442)). -- Use released version of lz4 library - ([1469](https://github.com/Shopify/sarama/pull/1469)). -- Set correct version in MockDeleteTopicsResponse - ([1484](https://github.com/Shopify/sarama/pull/1484)). -- Fix CLI help message typo - ([1494](https://github.com/Shopify/sarama/pull/1494)). - -Known Issues: -- Please **don't** use Zstd, as it doesn't work right now. - See https://github.com/Shopify/sarama/issues/1252 - -## Version 1.23.1 (2019-07-22) - -Bug Fixes: -- Fix fetch delete bug record - ([1425](https://github.com/Shopify/sarama/pull/1425)). -- Handle SASL/OAUTHBEARER token rejection - ([1428](https://github.com/Shopify/sarama/pull/1428)). - -## Version 1.23.0 (2019-07-02) - -New Features: -- Add support for Kafka 2.3.0 - ([1418](https://github.com/Shopify/sarama/pull/1418)). -- Add support for ListConsumerGroupOffsets v2 - ([1374](https://github.com/Shopify/sarama/pull/1374)). -- Add support for DeleteConsumerGroup - ([1417](https://github.com/Shopify/sarama/pull/1417)). -- Add support for SASLVersion configuration - ([1410](https://github.com/Shopify/sarama/pull/1410)). -- Add kerberos support - ([1366](https://github.com/Shopify/sarama/pull/1366)). - -Improvements: -- Improve sasl_scram_client example - ([1406](https://github.com/Shopify/sarama/pull/1406)). -- Fix shutdown and race-condition in consumer-group example - ([1404](https://github.com/Shopify/sarama/pull/1404)). -- Add support for error codes 77—81 - ([1397](https://github.com/Shopify/sarama/pull/1397)). -- Pool internal objects allocated per message - ([1385](https://github.com/Shopify/sarama/pull/1385)). -- Reduce packet decoder allocations - ([1373](https://github.com/Shopify/sarama/pull/1373)). -- Support timeout when fetching metadata - ([1359](https://github.com/Shopify/sarama/pull/1359)). - -Bug Fixes: -- Fix fetch size integer overflow - ([1376](https://github.com/Shopify/sarama/pull/1376)). -- Handle and log throttled FetchResponses - ([1383](https://github.com/Shopify/sarama/pull/1383)). -- Refactor misspelled word Resouce to Resource - ([1368](https://github.com/Shopify/sarama/pull/1368)). - -## Version 1.22.1 (2019-04-29) - -Improvements: -- Use zstd 1.3.8 - ([1350](https://github.com/Shopify/sarama/pull/1350)). -- Add support for SaslHandshakeRequest v1 - ([1354](https://github.com/Shopify/sarama/pull/1354)). - -Bug Fixes: -- Fix V5 MetadataRequest nullable topics array - ([1353](https://github.com/Shopify/sarama/pull/1353)). -- Use a different SCRAM client for each broker connection - ([1349](https://github.com/Shopify/sarama/pull/1349)). -- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 - ([1344](https://github.com/Shopify/sarama/pull/1344)). - -## Version 1.22.0 (2019-04-09) - -New Features: -- Add Offline Replicas Operation to Client - ([1318](https://github.com/Shopify/sarama/pull/1318)). -- Allow using proxy when connecting to broker - ([1326](https://github.com/Shopify/sarama/pull/1326)). -- Implement ReadCommitted - ([1307](https://github.com/Shopify/sarama/pull/1307)). -- Add support for Kafka 2.2.0 - ([1331](https://github.com/Shopify/sarama/pull/1331)). -- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes - ([1331](https://github.com/Shopify/sarama/pull/1295)). - -Improvements: -- Unregister all broker metrics on broker stop - ([1232](https://github.com/Shopify/sarama/pull/1232)). -- Add SCRAM authentication example - ([1303](https://github.com/Shopify/sarama/pull/1303)). -- Add consumergroup examples - ([1304](https://github.com/Shopify/sarama/pull/1304)). -- Expose consumer batch size metric - ([1296](https://github.com/Shopify/sarama/pull/1296)). -- Add TLS options to console producer and consumer - ([1300](https://github.com/Shopify/sarama/pull/1300)). -- Reduce client close bookkeeping - ([1297](https://github.com/Shopify/sarama/pull/1297)). -- Satisfy error interface in create responses - ([1154](https://github.com/Shopify/sarama/pull/1154)). -- Please lint gods - ([1346](https://github.com/Shopify/sarama/pull/1346)). - -Bug Fixes: -- Fix multi consumer group instance crash - ([1338](https://github.com/Shopify/sarama/pull/1338)). -- Update lz4 to latest version - ([1347](https://github.com/Shopify/sarama/pull/1347)). -- Retry ErrNotCoordinatorForConsumer in new consumergroup session - ([1231](https://github.com/Shopify/sarama/pull/1231)). -- Fix cleanup error handler - ([1332](https://github.com/Shopify/sarama/pull/1332)). -- Fix rate condition in PartitionConsumer - ([1156](https://github.com/Shopify/sarama/pull/1156)). - -## Version 1.21.0 (2019-02-24) - -New Features: -- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest - ([1236](https://github.com/Shopify/sarama/pull/1236)). -- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests - ([1178](https://github.com/Shopify/sarama/pull/1178)). -- Implement SASL/OAUTHBEARER - ([1240](https://github.com/Shopify/sarama/pull/1240)). - -Improvements: -- Add Go mod support - ([1282](https://github.com/Shopify/sarama/pull/1282)). -- Add error codes 73—76 - ([1239](https://github.com/Shopify/sarama/pull/1239)). -- Add retry backoff function - ([1160](https://github.com/Shopify/sarama/pull/1160)). -- Maintain metadata in the producer even when retries are disabled - ([1189](https://github.com/Shopify/sarama/pull/1189)). -- Include ReplicaAssignment in ListTopics - ([1274](https://github.com/Shopify/sarama/pull/1274)). -- Add producer performance tool - ([1222](https://github.com/Shopify/sarama/pull/1222)). -- Add support LogAppend timestamps - ([1258](https://github.com/Shopify/sarama/pull/1258)). - -Bug Fixes: -- Fix potential deadlock when a heartbeat request fails - ([1286](https://github.com/Shopify/sarama/pull/1286)). -- Fix consuming compacted topic - ([1227](https://github.com/Shopify/sarama/pull/1227)). -- Set correct Kafka version for DescribeConfigsRequest v1 - ([1277](https://github.com/Shopify/sarama/pull/1277)). -- Update kafka test version - ([1273](https://github.com/Shopify/sarama/pull/1273)). - -## Version 1.20.1 (2019-01-10) - -New Features: -- Add optional replica id in offset request - ([1100](https://github.com/Shopify/sarama/pull/1100)). - -Improvements: -- Implement DescribeConfigs Request + Response v1 & v2 - ([1230](https://github.com/Shopify/sarama/pull/1230)). -- Reuse compression objects - ([1185](https://github.com/Shopify/sarama/pull/1185)). -- Switch from png to svg for GoDoc link in README - ([1243](https://github.com/Shopify/sarama/pull/1243)). -- Fix typo in deprecation notice for FetchResponseBlock.Records - ([1242](https://github.com/Shopify/sarama/pull/1242)). -- Fix typos in consumer metadata response file - ([1244](https://github.com/Shopify/sarama/pull/1244)). - -Bug Fixes: -- Revert to individual msg retries for non-idempotent - ([1203](https://github.com/Shopify/sarama/pull/1203)). -- Respect MaxMessageBytes limit for uncompressed messages - ([1141](https://github.com/Shopify/sarama/pull/1141)). - -## Version 1.20.0 (2018-12-10) - -New Features: - - Add support for zstd compression - ([#1170](https://github.com/Shopify/sarama/pull/1170)). - - Add support for Idempotent Producer - ([#1152](https://github.com/Shopify/sarama/pull/1152)). - - Add support support for Kafka 2.1.0 - ([#1229](https://github.com/Shopify/sarama/pull/1229)). - - Add support support for OffsetCommit request/response pairs versions v1 to v5 - ([#1201](https://github.com/Shopify/sarama/pull/1201)). - - Add support support for OffsetFetch request/response pair up to version v5 - ([#1198](https://github.com/Shopify/sarama/pull/1198)). - -Improvements: - - Export broker's Rack setting - ([#1173](https://github.com/Shopify/sarama/pull/1173)). - - Always use latest patch version of Go on CI - ([#1202](https://github.com/Shopify/sarama/pull/1202)). - - Add error codes 61 to 72 - ([#1195](https://github.com/Shopify/sarama/pull/1195)). - -Bug Fixes: - - Fix build without cgo - ([#1182](https://github.com/Shopify/sarama/pull/1182)). - - Fix go vet suggestion in consumer group file - ([#1209](https://github.com/Shopify/sarama/pull/1209)). - - Fix typos in code and comments - ([#1228](https://github.com/Shopify/sarama/pull/1228)). - -## Version 1.19.0 (2018-09-27) - -New Features: - - Implement a higher-level consumer group - ([#1099](https://github.com/Shopify/sarama/pull/1099)). - -Improvements: - - Add support for Go 1.11 - ([#1176](https://github.com/Shopify/sarama/pull/1176)). - -Bug Fixes: - - Fix encoding of `MetadataResponse` with version 2 and higher - ([#1174](https://github.com/Shopify/sarama/pull/1174)). - - Fix race condition in mock async producer - ([#1174](https://github.com/Shopify/sarama/pull/1174)). - -## Version 1.18.0 (2018-09-07) - -New Features: - - Make `Partitioner.RequiresConsistency` vary per-message - ([#1112](https://github.com/Shopify/sarama/pull/1112)). - - Add customizable partitioner - ([#1118](https://github.com/Shopify/sarama/pull/1118)). - - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, - `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` - ([#1055](https://github.com/Shopify/sarama/pull/1055)). - -Improvements: - - Add support for Kafka 2.0.0 - ([#1149](https://github.com/Shopify/sarama/pull/1149)). - - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts - ([#1123](https://github.com/Shopify/sarama/pull/1123)). - - Simpler offset management - ([#1127](https://github.com/Shopify/sarama/pull/1127)). - -Bug Fixes: - - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka - ([#1110](https://github.com/Shopify/sarama/pull/1110)). - - Fix consumer block when response did not contain all the - expected topic/partition blocks - ([#1086](https://github.com/Shopify/sarama/pull/1086)). - - Fix consumer block when response contains only constrol messages - ([#1115](https://github.com/Shopify/sarama/pull/1115)). - - Add timeout config for ClusterAdmin requests - ([#1142](https://github.com/Shopify/sarama/pull/1142)). - - Add version check when producing message with headers - ([#1117](https://github.com/Shopify/sarama/pull/1117)). - - Fix `MetadataRequest` for empty list of topics - ([#1132](https://github.com/Shopify/sarama/pull/1132)). - - Fix producer topic metadata on-demand fetch when topic error happens in metadata response - ([#1125](https://github.com/Shopify/sarama/pull/1125)). - -## Version 1.17.0 (2018-05-30) - -New Features: - - Add support for gzip compression levels - ([#1044](https://github.com/Shopify/sarama/pull/1044)). - - Add support for Metadata request/response pairs versions v1 to v5 - ([#1047](https://github.com/Shopify/sarama/pull/1047), - [#1069](https://github.com/Shopify/sarama/pull/1069)). - - Add versioning to JoinGroup request/response pairs - ([#1098](https://github.com/Shopify/sarama/pull/1098)) - - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs - ([#1065](https://github.com/Shopify/sarama/pull/1065), - [#1096](https://github.com/Shopify/sarama/pull/1096), - [#1027](https://github.com/Shopify/sarama/pull/1027)). - - Add `Controller()` method to Client interface - ([#1063](https://github.com/Shopify/sarama/pull/1063)). - -Improvements: - - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp - ([#1010](https://github.com/Shopify/sarama/pull/1010)). - - Expose missing protocol parts: `msgSet` and `recordBatch` - ([#1049](https://github.com/Shopify/sarama/pull/1049)). - - Add support for v1 DeleteTopics Request - ([#1052](https://github.com/Shopify/sarama/pull/1052)). - - Add support for Go 1.10 - ([#1064](https://github.com/Shopify/sarama/pull/1064)). - - Claim support for Kafka 1.1.0 - ([#1073](https://github.com/Shopify/sarama/pull/1073)). - -Bug Fixes: - - Fix FindCoordinatorResponse.encode to allow nil Coordinator - ([#1050](https://github.com/Shopify/sarama/pull/1050), - [#1051](https://github.com/Shopify/sarama/pull/1051)). - - Clear all metadata when we have the latest topic info - ([#1033](https://github.com/Shopify/sarama/pull/1033)). - - Make `PartitionConsumer.Close` idempotent - ([#1092](https://github.com/Shopify/sarama/pull/1092)). - -## Version 1.16.0 (2018-02-12) - -New Features: - - Add support for the Create/Delete Topics request/response pairs - ([#1007](https://github.com/Shopify/sarama/pull/1007), - [#1008](https://github.com/Shopify/sarama/pull/1008)). - - Add support for the Describe/Create/Delete ACL request/response pairs - ([#1009](https://github.com/Shopify/sarama/pull/1009)). - - Add support for the five transaction-related request/response pairs - ([#1016](https://github.com/Shopify/sarama/pull/1016)). - -Improvements: - - Permit setting version on mock producer responses - ([#999](https://github.com/Shopify/sarama/pull/999)). - - Add `NewMockBrokerListener` helper for testing TLS connections - ([#1019](https://github.com/Shopify/sarama/pull/1019)). - - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB - which results in much higher throughput in most cases - ([#1024](https://github.com/Shopify/sarama/pull/1024)). - - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to - reduce CPU and memory usage when processing many partitions - ([#1028](https://github.com/Shopify/sarama/pull/1028)). - - Assign relative offsets to messages in the producer to save the brokers a - recompression pass - ([#1002](https://github.com/Shopify/sarama/pull/1002), - [#1015](https://github.com/Shopify/sarama/pull/1015)). - -Bug Fixes: - - Fix producing uncompressed batches with the new protocol format - ([#1032](https://github.com/Shopify/sarama/issues/1032)). - - Fix consuming compacted topics with the new protocol format - ([#1005](https://github.com/Shopify/sarama/issues/1005)). - - Fix consuming topics with a mix of protocol formats - ([#1021](https://github.com/Shopify/sarama/issues/1021)). - - Fix consuming when the broker includes multiple batches in a single response - ([#1022](https://github.com/Shopify/sarama/issues/1022)). - - Fix detection of `PartialTrailingMessage` when the partial message was - truncated before the magic value indicating its version - ([#1030](https://github.com/Shopify/sarama/pull/1030)). - - Fix expectation-checking in the mock of `SyncProducer.SendMessages` - ([#1035](https://github.com/Shopify/sarama/pull/1035)). - -## Version 1.15.0 (2017-12-08) - -New Features: - - Claim official support for Kafka 1.0, though it did already work - ([#984](https://github.com/Shopify/sarama/pull/984)). - - Helper methods for Kafka version numbers to/from strings - ([#989](https://github.com/Shopify/sarama/pull/989)). - - Implement CreatePartitions request/response - ([#985](https://github.com/Shopify/sarama/pull/985)). - -Improvements: - - Add error codes 45-60 - ([#986](https://github.com/Shopify/sarama/issues/986)). - -Bug Fixes: - - Fix slow consuming for certain Kafka 0.11/1.0 configurations - ([#982](https://github.com/Shopify/sarama/pull/982)). - - Correctly determine when a FetchResponse contains the new message format - ([#990](https://github.com/Shopify/sarama/pull/990)). - - Fix producing with multiple headers - ([#996](https://github.com/Shopify/sarama/pull/996)). - - Fix handling of truncated record batches - ([#998](https://github.com/Shopify/sarama/pull/998)). - - Fix leaking metrics when closing brokers - ([#991](https://github.com/Shopify/sarama/pull/991)). - -## Version 1.14.0 (2017-11-13) - -New Features: - - Add support for the new Kafka 0.11 record-batch format, including the wire - protocol and the necessary behavioural changes in the producer and consumer. - Transactions and idempotency are not yet supported, but producing and - consuming should work with all the existing bells and whistles (batching, - compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta - of Arista Networks for this work. Part of - ([#901](https://github.com/Shopify/sarama/issues/901)). - -Bug Fixes: - - Fix encoding of ProduceResponse versions in test - ([#970](https://github.com/Shopify/sarama/pull/970)). - - Return partial replicas list when we have it - ([#975](https://github.com/Shopify/sarama/pull/975)). - -## Version 1.13.0 (2017-10-04) - -New Features: - - Support for FetchRequest version 3 - ([#905](https://github.com/Shopify/sarama/pull/905)). - - Permit setting version on mock FetchResponses - ([#939](https://github.com/Shopify/sarama/pull/939)). - - Add a configuration option to support storing only minimal metadata for - extremely large clusters - ([#937](https://github.com/Shopify/sarama/pull/937)). - - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets - ([#932](https://github.com/Shopify/sarama/pull/932)). - -Improvements: - - Provide the block-level timestamp when consuming compressed messages - ([#885](https://github.com/Shopify/sarama/issues/885)). - - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned - by the broker, which can be meaningful - ([#930](https://github.com/Shopify/sarama/pull/930)). - - Use a `Ticker` to reduce consumer timer overhead at the cost of higher - variance in the actual timeout - ([#933](https://github.com/Shopify/sarama/pull/933)). - -Bug Fixes: - - Gracefully handle messages with negative timestamps - ([#907](https://github.com/Shopify/sarama/pull/907)). - - Raise a proper error when encountering an unknown message version - ([#940](https://github.com/Shopify/sarama/pull/940)). - -## Version 1.12.0 (2017-05-08) - -New Features: - - Added support for the `ApiVersions` request and response pair, and Kafka - version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note - that you still need to specify the Kafka version in the Sarama configuration - for the time being. - - Added a `Brokers` method to the Client which returns the complete set of - active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). - - Added an `InSyncReplicas` method to the Client which returns the set of all - in-sync broker IDs for the given partition, now that the Kafka versions for - which this was misleading are no longer in our supported set - ([#872](https://github.com/Shopify/sarama/pull/872)). - - Added a `NewCustomHashPartitioner` method which allows constructing a hash - partitioner with a custom hash method in case the default (FNV-1a) is not - suitable - ([#837](https://github.com/Shopify/sarama/pull/837), - [#841](https://github.com/Shopify/sarama/pull/841)). - -Improvements: - - Recognize more Kafka error codes - ([#859](https://github.com/Shopify/sarama/pull/859)). - -Bug Fixes: - - Fix an issue where decoding a malformed FetchRequest would not return the - correct error ([#818](https://github.com/Shopify/sarama/pull/818)). - - Respect ordering of group protocols in JoinGroupRequests. This fix is - transparent if you're using the `AddGroupProtocol` or - `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from - the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` - ([#812](https://github.com/Shopify/sarama/issues/812)). - - Fix an alignment-related issue with atomics on 32-bit architectures - ([#859](https://github.com/Shopify/sarama/pull/859)). - -## Version 1.11.0 (2016-12-20) - -_Important:_ As of Sarama 1.11 it is necessary to set the config value of -`Producer.Return.Successes` to true in order to use the SyncProducer. Previous -versions would silently override this value when instantiating a SyncProducer -which led to unexpected values and data races. - -New Features: - - Metrics! Thanks to Sébastien Launay for all his work on this feature - ([#701](https://github.com/Shopify/sarama/pull/701), - [#746](https://github.com/Shopify/sarama/pull/746), - [#766](https://github.com/Shopify/sarama/pull/766)). - - Add support for LZ4 compression - ([#786](https://github.com/Shopify/sarama/pull/786)). - - Add support for ListOffsetRequest v1 and Kafka 0.10.1 - ([#775](https://github.com/Shopify/sarama/pull/775)). - - Added a `HighWaterMarks` method to the Consumer which aggregates the - `HighWaterMarkOffset` values of its child topic/partitions - ([#769](https://github.com/Shopify/sarama/pull/769)). - -Bug Fixes: - - Fixed producing when using timestamps, compression and Kafka 0.10 - ([#759](https://github.com/Shopify/sarama/pull/759)). - - Added missing decoder methods to DescribeGroups response - ([#756](https://github.com/Shopify/sarama/pull/756)). - - Fix producer shutdown when `Return.Errors` is disabled - ([#787](https://github.com/Shopify/sarama/pull/787)). - - Don't mutate configuration in SyncProducer - ([#790](https://github.com/Shopify/sarama/pull/790)). - - Fix crash on SASL initialization failure - ([#795](https://github.com/Shopify/sarama/pull/795)). - -## Version 1.10.1 (2016-08-30) - -Bug Fixes: - - Fix the documentation for `HashPartitioner` which was incorrect - ([#717](https://github.com/Shopify/sarama/pull/717)). - - Permit client creation even when it is limited by ACLs - ([#722](https://github.com/Shopify/sarama/pull/722)). - - Several fixes to the consumer timer optimization code, regressions introduced - in v1.10.0. Go's timers are finicky - ([#730](https://github.com/Shopify/sarama/pull/730), - [#733](https://github.com/Shopify/sarama/pull/733), - [#734](https://github.com/Shopify/sarama/pull/734)). - - Handle consuming compressed relative offsets with Kafka 0.10 - ([#735](https://github.com/Shopify/sarama/pull/735)). - -## Version 1.10.0 (2016-08-02) - -_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of -Kafka you are running against (via the `config.Version` value) in order to use -features that may not be compatible with old Kafka versions. If you don't -specify this value it will default to 0.8.2 (the minimum supported), and trying -to use more recent features (like the offset manager) will fail with an error. - -_Also:_ The offset-manager's behaviour has been changed to match the upstream -java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and -[#713](https://github.com/Shopify/sarama/pull/713)). If you use the -offset-manager, please ensure that you are committing one *greater* than the -last consumed message offset or else you may end up consuming duplicate -messages. - -New Features: - - Support for Kafka 0.10 - ([#672](https://github.com/Shopify/sarama/pull/672), - [#678](https://github.com/Shopify/sarama/pull/678), - [#681](https://github.com/Shopify/sarama/pull/681), and others). - - Support for configuring the target Kafka version - ([#676](https://github.com/Shopify/sarama/pull/676)). - - Batch producing support in the SyncProducer - ([#677](https://github.com/Shopify/sarama/pull/677)). - - Extend producer mock to allow setting expectations on message contents - ([#667](https://github.com/Shopify/sarama/pull/667)). - -Improvements: - - Support `nil` compressed messages for deleting in compacted topics - ([#634](https://github.com/Shopify/sarama/pull/634)). - - Pre-allocate decoding errors, greatly reducing heap usage and GC time against - misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)). - - Re-use consumer expiry timers, removing one allocation per consumed message - ([#707](https://github.com/Shopify/sarama/pull/707)). - -Bug Fixes: - - Actually default the client ID to "sarama" like we say we do - ([#664](https://github.com/Shopify/sarama/pull/664)). - - Fix a rare issue where `Client.Leader` could return the wrong error - ([#685](https://github.com/Shopify/sarama/pull/685)). - - Fix a possible tight loop in the consumer - ([#693](https://github.com/Shopify/sarama/pull/693)). - - Match upstream's offset-tracking behaviour - ([#705](https://github.com/Shopify/sarama/pull/705)). - - Report UnknownTopicOrPartition errors from the offset manager - ([#706](https://github.com/Shopify/sarama/pull/706)). - - Fix possible negative partition value from the HashPartitioner - ([#709](https://github.com/Shopify/sarama/pull/709)). - -## Version 1.9.0 (2016-05-16) - -New Features: - - Add support for custom offset manager retention durations - ([#602](https://github.com/Shopify/sarama/pull/602)). - - Publish low-level mocks to enable testing of third-party producer/consumer - implementations ([#570](https://github.com/Shopify/sarama/pull/570)). - - Declare support for Golang 1.6 - ([#611](https://github.com/Shopify/sarama/pull/611)). - - Support for SASL plain-text auth - ([#648](https://github.com/Shopify/sarama/pull/648)). - -Improvements: - - Simplified broker locking scheme slightly - ([#604](https://github.com/Shopify/sarama/pull/604)). - - Documentation cleanup - ([#605](https://github.com/Shopify/sarama/pull/605), - [#621](https://github.com/Shopify/sarama/pull/621), - [#654](https://github.com/Shopify/sarama/pull/654)). - -Bug Fixes: - - Fix race condition shutting down the OffsetManager - ([#658](https://github.com/Shopify/sarama/pull/658)). - -## Version 1.8.0 (2016-02-01) - -New Features: - - Full support for Kafka 0.9: - - All protocol messages and fields - ([#586](https://github.com/Shopify/sarama/pull/586), - [#588](https://github.com/Shopify/sarama/pull/588), - [#590](https://github.com/Shopify/sarama/pull/590)). - - Verified that TLS support works - ([#581](https://github.com/Shopify/sarama/pull/581)). - - Fixed the OffsetManager compatibility - ([#585](https://github.com/Shopify/sarama/pull/585)). - -Improvements: - - Optimize for fewer system calls when reading from the network - ([#584](https://github.com/Shopify/sarama/pull/584)). - - Automatically retry `InvalidMessage` errors to match upstream behaviour - ([#589](https://github.com/Shopify/sarama/pull/589)). - -## Version 1.7.0 (2015-12-11) - -New Features: - - Preliminary support for Kafka 0.9 - ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several - caveats: - - Protocol-layer support is mostly in place - ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9 - renamed some messages and fields, which we did not in order to preserve API - compatibility. - - The producer and consumer work against 0.9, but the offset manager does - not ([#573](https://github.com/Shopify/sarama/pull/573)). - - TLS support may or may not work - ([#581](https://github.com/Shopify/sarama/pull/581)). - -Improvements: - - Don't wait for request timeouts on dead brokers, greatly speeding recovery - when the TCP connection is left hanging - ([#548](https://github.com/Shopify/sarama/pull/548)). - - Refactored part of the producer. The new version provides a much more elegant - solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also - slightly more efficient, and much more precise in calculating batch sizes - when compression is used - ([#549](https://github.com/Shopify/sarama/pull/549), - [#550](https://github.com/Shopify/sarama/pull/550), - [#551](https://github.com/Shopify/sarama/pull/551)). - -Bug Fixes: - - Fix race condition in consumer test mock - ([#553](https://github.com/Shopify/sarama/pull/553)). - -## Version 1.6.1 (2015-09-25) - -Bug Fixes: - - Fix panic that could occur if a user-supplied message value failed to encode - ([#449](https://github.com/Shopify/sarama/pull/449)). - -## Version 1.6.0 (2015-09-04) - -New Features: - - Implementation of a consumer offset manager using the APIs introduced in - Kafka 0.8.2. The API is designed mainly for integration into a future - high-level consumer, not for direct use, although it is *possible* to use it - directly. - ([#461](https://github.com/Shopify/sarama/pull/461)). - -Improvements: - - CRC32 calculation is much faster on machines with SSE4.2 instructions, - removing a major hotspot from most profiles - ([#255](https://github.com/Shopify/sarama/pull/255)). - -Bug Fixes: - - Make protocol decoding more robust against some malformed packets generated - by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523), - [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways - ([#528](https://github.com/Shopify/sarama/pull/528)). - - Fix a potential race condition panic in the consumer on shutdown - ([#529](https://github.com/Shopify/sarama/pull/529)). - -## Version 1.5.0 (2015-08-17) - -New Features: - - TLS-encrypted network connections are now supported. This feature is subject - to change when Kafka releases built-in TLS support, but for now this is - enough to work with TLS-terminating proxies - ([#154](https://github.com/Shopify/sarama/pull/154)). - -Improvements: - - The consumer will not block if a single partition is not drained by the user; - all other partitions will continue to consume normally - ([#485](https://github.com/Shopify/sarama/pull/485)). - - Formatting of error strings has been much improved - ([#495](https://github.com/Shopify/sarama/pull/495)). - - Internal refactoring of the producer for code cleanliness and to enable - future work ([#300](https://github.com/Shopify/sarama/pull/300)). - -Bug Fixes: - - Fix a potential deadlock in the consumer on shutdown - ([#475](https://github.com/Shopify/sarama/pull/475)). - -## Version 1.4.3 (2015-07-21) - -Bug Fixes: - - Don't include the partitioner in the producer's "fetch partitions" - circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). - - Don't retry messages until the broker is closed when abandoning a broker in - the producer ([#468](https://github.com/Shopify/sarama/pull/468)). - - Update the import path for snappy-go, it has moved again and the API has - changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). - -## Version 1.4.2 (2015-05-27) - -Bug Fixes: - - Update the import path for snappy-go, it has moved from google code to github - ([#456](https://github.com/Shopify/sarama/pull/456)). - -## Version 1.4.1 (2015-05-25) - -Improvements: - - Optimizations when decoding snappy messages, thanks to John Potocny - ([#446](https://github.com/Shopify/sarama/pull/446)). - -Bug Fixes: - - Fix hypothetical race conditions on producer shutdown - ([#450](https://github.com/Shopify/sarama/pull/450), - [#451](https://github.com/Shopify/sarama/pull/451)). - -## Version 1.4.0 (2015-05-01) - -New Features: - - The consumer now implements `Topics()` and `Partitions()` methods to enable - users to dynamically choose what topics/partitions to consume without - instantiating a full client - ([#431](https://github.com/Shopify/sarama/pull/431)). - - The partition-consumer now exposes the high water mark offset value returned - by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). - - Added a `kafka-console-consumer` tool capable of handling multiple - partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` - ([#439](https://github.com/Shopify/sarama/pull/439), - [#442](https://github.com/Shopify/sarama/pull/442)). - -Improvements: - - The producer's logging during retry scenarios is more consistent, more - useful, and slightly less verbose - ([#429](https://github.com/Shopify/sarama/pull/429)). - - The client now shuffles its initial list of seed brokers in order to prevent - thundering herd on the first broker in the list - ([#441](https://github.com/Shopify/sarama/pull/441)). - -Bug Fixes: - - The producer now correctly manages its state if retries occur when it is - shutting down, fixing several instances of confusing behaviour and at least - one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). - - The consumer now handles messages for different partitions asynchronously, - making it much more resilient to specific user code ordering - ([#325](https://github.com/Shopify/sarama/pull/325)). - -## Version 1.3.0 (2015-04-16) - -New Features: - - The client now tracks consumer group coordinators using - ConsumerMetadataRequests similar to how it tracks partition leadership using - regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). - This adds two methods to the client API: - - `Coordinator(consumerGroup string) (*Broker, error)` - - `RefreshCoordinator(consumerGroup string) error` - -Improvements: - - ConsumerMetadataResponses now automatically create a Broker object out of the - ID/address/port combination for the Coordinator; accessing the fields - individually has been deprecated - ([#413](https://github.com/Shopify/sarama/pull/413)). - - Much improved handling of `OffsetOutOfRange` errors in the consumer. - Consumers will fail to start if the provided offset is out of range - ([#418](https://github.com/Shopify/sarama/pull/418)) - and they will automatically shut down if the offset falls out of range - ([#424](https://github.com/Shopify/sarama/pull/424)). - - Small performance improvement in encoding and decoding protocol messages - ([#427](https://github.com/Shopify/sarama/pull/427)). - -Bug Fixes: - - Fix a rare race condition in the client's background metadata refresher if - it happens to be activated while the client is being closed - ([#422](https://github.com/Shopify/sarama/pull/422)). - -## Version 1.2.0 (2015-04-07) - -Improvements: - - The producer's behaviour when `Flush.Frequency` is set is now more intuitive - ([#389](https://github.com/Shopify/sarama/pull/389)). - - The producer is now somewhat more memory-efficient during and after retrying - messages due to an improved queue implementation - ([#396](https://github.com/Shopify/sarama/pull/396)). - - The consumer produces much more useful logging output when leadership - changes ([#385](https://github.com/Shopify/sarama/pull/385)). - - The client's `GetOffset` method will now automatically refresh metadata and - retry once in the event of stale information or similar - ([#394](https://github.com/Shopify/sarama/pull/394)). - - Broker connections now have support for using TCP keepalives - ([#407](https://github.com/Shopify/sarama/issues/407)). - -Bug Fixes: - - The OffsetCommitRequest message now correctly implements all three possible - API versions ([#390](https://github.com/Shopify/sarama/pull/390), - [#400](https://github.com/Shopify/sarama/pull/400)). - -## Version 1.1.0 (2015-03-20) - -Improvements: - - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly - broken topics don't choke throughput - ([#373](https://github.com/Shopify/sarama/pull/373)). - -Bug Fixes: - - Fix the producer's internal reference counting in certain unusual scenarios - ([#367](https://github.com/Shopify/sarama/pull/367)). - - Fix the consumer's internal reference counting in certain unusual scenarios - ([#369](https://github.com/Shopify/sarama/pull/369)). - - Fix a condition where the producer's internal control messages could have - gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). - - Fix an issue where invalid partition lists would be cached when asking for - metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). - - -## Version 1.0.0 (2015-03-17) - -Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: - -- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. -- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. -- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. -- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. -- All the configuration values have been unified in the `Config` struct. -- Much improved test suite. diff --git a/vendor/github.com/Shopify/sarama/Dockerfile.kafka b/vendor/github.com/Shopify/sarama/Dockerfile.kafka deleted file mode 100644 index c361f6d122..0000000000 --- a/vendor/github.com/Shopify/sarama/Dockerfile.kafka +++ /dev/null @@ -1,27 +0,0 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal:latest - -USER root - -RUN microdnf update \ - && microdnf install curl gzip java-11-openjdk-headless tar \ - && microdnf clean all - -ENV JAVA_HOME=/usr/lib/jvm/jre-11 - -# https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html -# Ensure Java doesn't cache any dns results -RUN cd /etc/java/java-11-openjdk/*/conf/security \ - && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \ - && echo 'networkaddress.cache.ttl=0' >> java.security \ - && echo 'networkaddress.cache.negative.ttl=0' >> java.security - -# https://github.com/apache/kafka/blob/0d518aaed158896ee9ee6949b8f38128d1d73634/tests/docker/Dockerfile#L65-L67 -ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" -RUN mkdir -p "/opt/kafka-2.8.2" && chmod a+rw /opt/kafka-2.8.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-2.8.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-2.8.2" -RUN mkdir -p "/opt/kafka-3.0.2" && chmod a+rw /opt/kafka-3.0.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.0.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.0.2" -RUN mkdir -p "/opt/kafka-3.1.2" && chmod a+rw /opt/kafka-3.1.2 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.1.2.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.1.2" -RUN mkdir -p "/opt/kafka-3.2.3" && chmod a+rw /opt/kafka-3.2.3 && curl -s "$KAFKA_MIRROR/kafka_2.12-3.2.3.tgz" | tar xz --strip-components=1 -C "/opt/kafka-3.2.3" - -COPY entrypoint.sh / - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go deleted file mode 100644 index aa7fb74986..0000000000 --- a/vendor/github.com/Shopify/sarama/decompress.go +++ /dev/null @@ -1,61 +0,0 @@ -package sarama - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "sync" - - snappy "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4/v4" -) - -var ( - lz4ReaderPool = sync.Pool{ - New: func() interface{} { - return lz4.NewReader(nil) - }, - } - - gzipReaderPool sync.Pool -) - -func decompress(cc CompressionCodec, data []byte) ([]byte, error) { - switch cc { - case CompressionNone: - return data, nil - case CompressionGZIP: - var err error - reader, ok := gzipReaderPool.Get().(*gzip.Reader) - if !ok { - reader, err = gzip.NewReader(bytes.NewReader(data)) - } else { - err = reader.Reset(bytes.NewReader(data)) - } - - if err != nil { - return nil, err - } - - defer gzipReaderPool.Put(reader) - - return io.ReadAll(reader) - case CompressionSnappy: - return snappy.Decode(data) - case CompressionLZ4: - reader, ok := lz4ReaderPool.Get().(*lz4.Reader) - if !ok { - reader = lz4.NewReader(bytes.NewReader(data)) - } else { - reader.Reset(bytes.NewReader(data)) - } - defer lz4ReaderPool.Put(reader) - - return io.ReadAll(reader) - case CompressionZSTD: - return zstdDecompress(ZstdDecoderParams{}, nil, data) - default: - return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} - } -} diff --git a/vendor/github.com/Shopify/sarama/entrypoint.sh b/vendor/github.com/Shopify/sarama/entrypoint.sh deleted file mode 100755 index cbcbcfc588..0000000000 --- a/vendor/github.com/Shopify/sarama/entrypoint.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -KAFKA_VERSION="${KAFKA_VERSION:-3.1.2}" -KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" - -if [ ! -d "${KAFKA_HOME}" ]; then - echo 'Error: KAFKA_VERSION '$KAFKA_VERSION' not available in this image at '$KAFKA_HOME - exit 1 -fi - -cd "${KAFKA_HOME}" || exit 1 - -# discard all empty/commented lines -sed -e '/^#/d' -e '/^$/d' -i"" config/server.properties - -# emulate kafka_configure_from_environment_variables from bitnami/bitnami-docker-kafka -for var in "${!KAFKA_CFG_@}"; do - key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' -e 's/.*/\L&/')" - sed -e '/^'$key'/d' -i"" config/server.properties - value="${!var}" - echo "$key=$value" >>config/server.properties -done - -sort config/server.properties - -exec bin/kafka-server-start.sh config/server.properties diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go deleted file mode 100644 index 4553b2d2ea..0000000000 --- a/vendor/github.com/Shopify/sarama/list_groups_request.go +++ /dev/null @@ -1,27 +0,0 @@ -package sarama - -type ListGroupsRequest struct{} - -func (r *ListGroupsRequest) encode(pe packetEncoder) error { - return nil -} - -func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { - return nil -} - -func (r *ListGroupsRequest) key() int16 { - return 16 -} - -func (r *ListGroupsRequest) version() int16 { - return 0 -} - -func (r *ListGroupsRequest) headerVersion() int16 { - return 1 -} - -func (r *ListGroupsRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go deleted file mode 100644 index 777bae7e63..0000000000 --- a/vendor/github.com/Shopify/sarama/list_groups_response.go +++ /dev/null @@ -1,73 +0,0 @@ -package sarama - -type ListGroupsResponse struct { - Err KError - Groups map[string]string -} - -func (r *ListGroupsResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - - if err := pe.putArrayLength(len(r.Groups)); err != nil { - return err - } - for groupId, protocolType := range r.Groups { - if err := pe.putString(groupId); err != nil { - return err - } - if err := pe.putString(protocolType); err != nil { - return err - } - } - - return nil -} - -func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { - kerr, err := pd.getInt16() - if err != nil { - return err - } - - r.Err = KError(kerr) - - n, err := pd.getArrayLength() - if err != nil { - return err - } - if n == 0 { - return nil - } - - r.Groups = make(map[string]string) - for i := 0; i < n; i++ { - groupId, err := pd.getString() - if err != nil { - return err - } - protocolType, err := pd.getString() - if err != nil { - return err - } - - r.Groups[groupId] = protocolType - } - - return nil -} - -func (r *ListGroupsResponse) key() int16 { - return 16 -} - -func (r *ListGroupsResponse) version() int16 { - return 0 -} - -func (r *ListGroupsResponse) headerVersion() int16 { - return 0 -} - -func (r *ListGroupsResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 -} diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go deleted file mode 100644 index e835f5a9c8..0000000000 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ /dev/null @@ -1,85 +0,0 @@ -package sarama - -type MetadataRequest struct { - Version int16 - Topics []string - AllowAutoTopicCreation bool -} - -func (r *MetadataRequest) encode(pe packetEncoder) error { - if r.Version < 0 || r.Version > 5 { - return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} - } - if r.Version == 0 || len(r.Topics) > 0 { - err := pe.putArrayLength(len(r.Topics)) - if err != nil { - return err - } - - for i := range r.Topics { - err = pe.putString(r.Topics[i]) - if err != nil { - return err - } - } - } else { - pe.putInt32(-1) - } - if r.Version > 3 { - pe.putBool(r.AllowAutoTopicCreation) - } - return nil -} - -func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { - r.Version = version - size, err := pd.getInt32() - if err != nil { - return err - } - if size > 0 { - r.Topics = make([]string, size) - for i := range r.Topics { - topic, err := pd.getString() - if err != nil { - return err - } - r.Topics[i] = topic - } - } - if r.Version > 3 { - autoCreation, err := pd.getBool() - if err != nil { - return err - } - r.AllowAutoTopicCreation = autoCreation - } - return nil -} - -func (r *MetadataRequest) key() int16 { - return 3 -} - -func (r *MetadataRequest) version() int16 { - return r.Version -} - -func (r *MetadataRequest) headerVersion() int16 { - return 1 -} - -func (r *MetadataRequest) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_10_0_0 - case 2: - return V0_10_1_0 - case 3, 4: - return V0_11_0_0 - case 5: - return V1_0_0_0 - default: - return MinVersion - } -} diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go deleted file mode 100644 index 0bb8702cc3..0000000000 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ /dev/null @@ -1,325 +0,0 @@ -package sarama - -type PartitionMetadata struct { - Err KError - ID int32 - Leader int32 - Replicas []int32 - Isr []int32 - OfflineReplicas []int32 -} - -func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - pm.Err = KError(tmp) - - pm.ID, err = pd.getInt32() - if err != nil { - return err - } - - pm.Leader, err = pd.getInt32() - if err != nil { - return err - } - - pm.Replicas, err = pd.getInt32Array() - if err != nil { - return err - } - - pm.Isr, err = pd.getInt32Array() - if err != nil { - return err - } - - if version >= 5 { - pm.OfflineReplicas, err = pd.getInt32Array() - if err != nil { - return err - } - } - - return nil -} - -func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) { - pe.putInt16(int16(pm.Err)) - pe.putInt32(pm.ID) - pe.putInt32(pm.Leader) - - err = pe.putInt32Array(pm.Replicas) - if err != nil { - return err - } - - err = pe.putInt32Array(pm.Isr) - if err != nil { - return err - } - - if version >= 5 { - err = pe.putInt32Array(pm.OfflineReplicas) - if err != nil { - return err - } - } - - return nil -} - -type TopicMetadata struct { - Err KError - Name string - IsInternal bool // Only valid for Version >= 1 - Partitions []*PartitionMetadata -} - -func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - tm.Err = KError(tmp) - - tm.Name, err = pd.getString() - if err != nil { - return err - } - - if version >= 1 { - tm.IsInternal, err = pd.getBool() - if err != nil { - return err - } - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - tm.Partitions = make([]*PartitionMetadata, n) - for i := 0; i < n; i++ { - tm.Partitions[i] = new(PartitionMetadata) - err = tm.Partitions[i].decode(pd, version) - if err != nil { - return err - } - } - - return nil -} - -func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { - pe.putInt16(int16(tm.Err)) - - err = pe.putString(tm.Name) - if err != nil { - return err - } - - if version >= 1 { - pe.putBool(tm.IsInternal) - } - - err = pe.putArrayLength(len(tm.Partitions)) - if err != nil { - return err - } - - for _, pm := range tm.Partitions { - err = pm.encode(pe, version) - if err != nil { - return err - } - } - - return nil -} - -type MetadataResponse struct { - Version int16 - ThrottleTimeMs int32 - Brokers []*Broker - ClusterID *string - ControllerID int32 - Topics []*TopicMetadata -} - -func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { - r.Version = version - - if version >= 3 { - r.ThrottleTimeMs, err = pd.getInt32() - if err != nil { - return err - } - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Brokers = make([]*Broker, n) - for i := 0; i < n; i++ { - r.Brokers[i] = new(Broker) - err = r.Brokers[i].decode(pd, version) - if err != nil { - return err - } - } - - if version >= 2 { - r.ClusterID, err = pd.getNullableString() - if err != nil { - return err - } - } - - if version >= 1 { - r.ControllerID, err = pd.getInt32() - if err != nil { - return err - } - } else { - r.ControllerID = -1 - } - - n, err = pd.getArrayLength() - if err != nil { - return err - } - - r.Topics = make([]*TopicMetadata, n) - for i := 0; i < n; i++ { - r.Topics[i] = new(TopicMetadata) - err = r.Topics[i].decode(pd, version) - if err != nil { - return err - } - } - - return nil -} - -func (r *MetadataResponse) encode(pe packetEncoder) error { - if r.Version >= 3 { - pe.putInt32(r.ThrottleTimeMs) - } - - err := pe.putArrayLength(len(r.Brokers)) - if err != nil { - return err - } - for _, broker := range r.Brokers { - err = broker.encode(pe, r.Version) - if err != nil { - return err - } - } - - if r.Version >= 2 { - err := pe.putNullableString(r.ClusterID) - if err != nil { - return err - } - } - - if r.Version >= 1 { - pe.putInt32(r.ControllerID) - } - - err = pe.putArrayLength(len(r.Topics)) - if err != nil { - return err - } - for _, tm := range r.Topics { - err = tm.encode(pe, r.Version) - if err != nil { - return err - } - } - - return nil -} - -func (r *MetadataResponse) key() int16 { - return 3 -} - -func (r *MetadataResponse) version() int16 { - return r.Version -} - -func (r *MetadataResponse) headerVersion() int16 { - return 0 -} - -func (r *MetadataResponse) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_10_0_0 - case 2: - return V0_10_1_0 - case 3, 4: - return V0_11_0_0 - case 5: - return V1_0_0_0 - default: - return MinVersion - } -} - -// testing API - -func (r *MetadataResponse) AddBroker(addr string, id int32) { - r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) -} - -func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { - var tmatch *TopicMetadata - - for _, tm := range r.Topics { - if tm.Name == topic { - tmatch = tm - goto foundTopic - } - } - - tmatch = new(TopicMetadata) - tmatch.Name = topic - r.Topics = append(r.Topics, tmatch) - -foundTopic: - - tmatch.Err = err - return tmatch -} - -func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) { - tmatch := r.AddTopic(topic, ErrNoError) - var pmatch *PartitionMetadata - - for _, pm := range tmatch.Partitions { - if pm.ID == partition { - pmatch = pm - goto foundPartition - } - } - - pmatch = new(PartitionMetadata) - pmatch.ID = partition - tmatch.Partitions = append(tmatch.Partitions, pmatch) - -foundPartition: - - pmatch.Leader = brokerID - pmatch.Replicas = replicas - pmatch.Isr = isr - pmatch.OfflineReplicas = offline - pmatch.Err = err -} diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go deleted file mode 100644 index 80507e14e4..0000000000 --- a/vendor/github.com/Shopify/sarama/zstd.go +++ /dev/null @@ -1,50 +0,0 @@ -package sarama - -import ( - "sync" - - "github.com/klauspost/compress/zstd" -) - -type ZstdEncoderParams struct { - Level int -} -type ZstdDecoderParams struct { -} - -var zstdEncMap, zstdDecMap sync.Map - -func getEncoder(params ZstdEncoderParams) *zstd.Encoder { - if ret, ok := zstdEncMap.Load(params); ok { - return ret.(*zstd.Encoder) - } - // It's possible to race and create multiple new writers. - // Only one will survive GC after use. - encoderLevel := zstd.SpeedDefault - if params.Level != CompressionLevelDefault { - encoderLevel = zstd.EncoderLevelFromZstd(params.Level) - } - zstdEnc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true), - zstd.WithEncoderLevel(encoderLevel)) - zstdEncMap.Store(params, zstdEnc) - return zstdEnc -} - -func getDecoder(params ZstdDecoderParams) *zstd.Decoder { - if ret, ok := zstdDecMap.Load(params); ok { - return ret.(*zstd.Decoder) - } - // It's possible to race and create multiple new readers. - // Only one will survive GC after use. - zstdDec, _ := zstd.NewReader(nil) - zstdDecMap.Store(params, zstdDec) - return zstdDec -} - -func zstdDecompress(params ZstdDecoderParams, dst, src []byte) ([]byte, error) { - return getDecoder(params).DecodeAll(src, dst) -} - -func zstdCompress(params ZstdEncoderParams, dst, src []byte) ([]byte, error) { - return getEncoder(params).EncodeAll(src, dst), nil -} diff --git a/vendor/github.com/coreos/go-oidc/v3/LICENSE b/vendor/github.com/coreos/go-oidc/v3/LICENSE new file mode 100644 index 0000000000..e06d208186 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/go-oidc/v3/NOTICE b/vendor/github.com/coreos/go-oidc/v3/NOTICE new file mode 100644 index 0000000000..b39ddfa5cb --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go new file mode 100644 index 0000000000..b7bd09275d --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go @@ -0,0 +1,17 @@ +package oidc + +// JOSE asymmetric signing algorithm values as defined by RFC 7518 +// +// see: https://tools.ietf.org/html/rfc7518#section-3.1 +const ( + RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256 + RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384 + RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512 + ES256 = "ES256" // ECDSA using P-256 and SHA-256 + ES384 = "ES384" // ECDSA using P-384 and SHA-384 + ES512 = "ES512" // ECDSA using P-521 and SHA-512 + PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256 + PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384 + PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 + EdDSA = "EdDSA" // Ed25519 using SHA-512 +) diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go new file mode 100644 index 0000000000..b1e3f7e3ff --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go @@ -0,0 +1,250 @@ +package oidc + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "errors" + "fmt" + "io" + "net/http" + "sync" + "time" + + jose "github.com/go-jose/go-jose/v3" +) + +// StaticKeySet is a verifier that validates JWT against a static set of public keys. +type StaticKeySet struct { + // PublicKeys used to verify the JWT. Supported types are *rsa.PublicKey and + // *ecdsa.PublicKey. + PublicKeys []crypto.PublicKey +} + +// VerifySignature compares the signature against a static set of public keys. +func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { + jws, err := jose.ParseSigned(jwt) + if err != nil { + return nil, fmt.Errorf("parsing jwt: %v", err) + } + for _, pub := range s.PublicKeys { + switch pub.(type) { + case *rsa.PublicKey: + case *ecdsa.PublicKey: + case ed25519.PublicKey: + default: + return nil, fmt.Errorf("invalid public key type provided: %T", pub) + } + payload, err := jws.Verify(pub) + if err != nil { + continue + } + return payload, nil + } + return nil, fmt.Errorf("no public keys able to verify jwt") +} + +// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP +// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically +// used by NewProvider using the URLs returned by OpenID Connect discovery, but is +// exposed for providers that don't support discovery or to prevent round trips to the +// discovery URL. +// +// The returned KeySet is a long lived verifier that caches keys based on any +// keys change. Reuse a common remote key set instead of creating new ones as needed. +func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet { + return newRemoteKeySet(ctx, jwksURL, time.Now) +} + +func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *RemoteKeySet { + if now == nil { + now = time.Now + } + return &RemoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now} +} + +// RemoteKeySet is a KeySet implementation that validates JSON web tokens against +// a jwks_uri endpoint. +type RemoteKeySet struct { + jwksURL string + ctx context.Context + now func() time.Time + + // guard all other fields + mu sync.RWMutex + + // inflight suppresses parallel execution of updateKeys and allows + // multiple goroutines to wait for its result. + inflight *inflight + + // A set of cached keys. + cachedKeys []jose.JSONWebKey +} + +// inflight is used to wait on some in-flight request from multiple goroutines. +type inflight struct { + doneCh chan struct{} + + keys []jose.JSONWebKey + err error +} + +func newInflight() *inflight { + return &inflight{doneCh: make(chan struct{})} +} + +// wait returns a channel that multiple goroutines can receive on. Once it returns +// a value, the inflight request is done and result() can be inspected. +func (i *inflight) wait() <-chan struct{} { + return i.doneCh +} + +// done can only be called by a single goroutine. It records the result of the +// inflight request and signals other goroutines that the result is safe to +// inspect. +func (i *inflight) done(keys []jose.JSONWebKey, err error) { + i.keys = keys + i.err = err + close(i.doneCh) +} + +// result cannot be called until the wait() channel has returned a value. +func (i *inflight) result() ([]jose.JSONWebKey, error) { + return i.keys, i.err +} + +// paresdJWTKey is a context key that allows common setups to avoid parsing the +// JWT twice. It holds a *jose.JSONWebSignature value. +var parsedJWTKey contextKey + +// VerifySignature validates a payload against a signature from the jwks_uri. +// +// Users MUST NOT call this method directly and should use an IDTokenVerifier +// instead. This method skips critical validations such as 'alg' values and is +// only exported to implement the KeySet interface. +func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { + jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature) + if !ok { + var err error + jws, err = jose.ParseSigned(jwt) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + } + return r.verify(ctx, jws) +} + +func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) { + // We don't support JWTs signed with multiple signatures. + keyID := "" + for _, sig := range jws.Signatures { + keyID = sig.Header.KeyID + break + } + + keys := r.keysFromCache() + for _, key := range keys { + if keyID == "" || key.KeyID == keyID { + if payload, err := jws.Verify(&key); err == nil { + return payload, nil + } + } + } + + // If the kid doesn't match, check for new keys from the remote. This is the + // strategy recommended by the spec. + // + // https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys + keys, err := r.keysFromRemote(ctx) + if err != nil { + return nil, fmt.Errorf("fetching keys %w", err) + } + + for _, key := range keys { + if keyID == "" || key.KeyID == keyID { + if payload, err := jws.Verify(&key); err == nil { + return payload, nil + } + } + } + return nil, errors.New("failed to verify id token signature") +} + +func (r *RemoteKeySet) keysFromCache() (keys []jose.JSONWebKey) { + r.mu.RLock() + defer r.mu.RUnlock() + return r.cachedKeys +} + +// keysFromRemote syncs the key set from the remote set, records the values in the +// cache, and returns the key set. +func (r *RemoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) { + // Need to lock to inspect the inflight request field. + r.mu.Lock() + // If there's not a current inflight request, create one. + if r.inflight == nil { + r.inflight = newInflight() + + // This goroutine has exclusive ownership over the current inflight + // request. It releases the resource by nil'ing the inflight field + // once the goroutine is done. + go func() { + // Sync keys and finish inflight when that's done. + keys, err := r.updateKeys() + + r.inflight.done(keys, err) + + // Lock to update the keys and indicate that there is no longer an + // inflight request. + r.mu.Lock() + defer r.mu.Unlock() + + if err == nil { + r.cachedKeys = keys + } + + // Free inflight so a different request can run. + r.inflight = nil + }() + } + inflight := r.inflight + r.mu.Unlock() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-inflight.wait(): + return inflight.result() + } +} + +func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) { + req, err := http.NewRequest("GET", r.jwksURL, nil) + if err != nil { + return nil, fmt.Errorf("oidc: can't create request: %v", err) + } + + resp, err := doRequest(r.ctx, req) + if err != nil { + return nil, fmt.Errorf("oidc: get keys failed %w", err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body) + } + + var keySet jose.JSONWebKeySet + err = unmarshalResp(resp, body, &keySet) + if err != nil { + return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body) + } + return keySet.Keys, nil +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go new file mode 100644 index 0000000000..b7db3c7342 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go @@ -0,0 +1,554 @@ +// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package. +package oidc + +import ( + "context" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "io" + "mime" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/oauth2" +) + +const ( + // ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests. + ScopeOpenID = "openid" + + // ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting + // OAuth2 refresh tokens. + // + // Support for this scope differs between OpenID Connect providers. For instance + // Google rejects it, favoring appending "access_type=offline" as part of the + // authorization request instead. + // + // See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess + ScopeOfflineAccess = "offline_access" +) + +var ( + errNoAtHash = errors.New("id token did not have an access token hash") + errInvalidAtHash = errors.New("access token hash does not match value in ID token") +) + +type contextKey int + +var issuerURLKey contextKey + +// ClientContext returns a new Context that carries the provided HTTP client. +// +// This method sets the same context key used by the golang.org/x/oauth2 package, +// so the returned context works for that package too. +// +// myClient := &http.Client{} +// ctx := oidc.ClientContext(parentContext, myClient) +// +// // This will use the custom client +// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com") +func ClientContext(ctx context.Context, client *http.Client) context.Context { + return context.WithValue(ctx, oauth2.HTTPClient, client) +} + +func getClient(ctx context.Context) *http.Client { + if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok { + return c + } + return nil +} + +// InsecureIssuerURLContext allows discovery to work when the issuer_url reported +// by upstream is mismatched with the discovery URL. This is meant for integration +// with off-spec providers such as Azure. +// +// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0" +// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0" +// +// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL) +// +// // Provider will be discovered with the discoveryBaseURL, but use issuerURL +// // for future issuer validation. +// provider, err := oidc.NewProvider(ctx, discoveryBaseURL) +// +// This is insecure because validating the correct issuer is critical for multi-tenant +// proivders. Any overrides here MUST be carefully reviewed. +func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context { + return context.WithValue(ctx, issuerURLKey, issuerURL) +} + +func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { + client := http.DefaultClient + if c := getClient(ctx); c != nil { + client = c + } + return client.Do(req.WithContext(ctx)) +} + +// Provider represents an OpenID Connect server's configuration. +type Provider struct { + issuer string + authURL string + tokenURL string + deviceAuthURL string + userInfoURL string + jwksURL string + algorithms []string + + // Raw claims returned by the server. + rawClaims []byte + + // Guards all of the following fields. + mu sync.Mutex + // HTTP client specified from the initial NewProvider request. This is used + // when creating the common key set. + client *http.Client + // A key set that uses context.Background() and is shared between all code paths + // that don't have a convinent way of supplying a unique context. + commonRemoteKeySet KeySet +} + +func (p *Provider) remoteKeySet() KeySet { + p.mu.Lock() + defer p.mu.Unlock() + if p.commonRemoteKeySet == nil { + ctx := context.Background() + if p.client != nil { + ctx = ClientContext(ctx, p.client) + } + p.commonRemoteKeySet = NewRemoteKeySet(ctx, p.jwksURL) + } + return p.commonRemoteKeySet +} + +type providerJSON struct { + Issuer string `json:"issuer"` + AuthURL string `json:"authorization_endpoint"` + TokenURL string `json:"token_endpoint"` + DeviceAuthURL string `json:"device_authorization_endpoint"` + JWKSURL string `json:"jwks_uri"` + UserInfoURL string `json:"userinfo_endpoint"` + Algorithms []string `json:"id_token_signing_alg_values_supported"` +} + +// supportedAlgorithms is a list of algorithms explicitly supported by this +// package. If a provider supports other algorithms, such as HS256 or none, +// those values won't be passed to the IDTokenVerifier. +var supportedAlgorithms = map[string]bool{ + RS256: true, + RS384: true, + RS512: true, + ES256: true, + ES384: true, + ES512: true, + PS256: true, + PS384: true, + PS512: true, + EdDSA: true, +} + +// ProviderConfig allows creating providers when discovery isn't supported. It's +// generally easier to use NewProvider directly. +type ProviderConfig struct { + // IssuerURL is the identity of the provider, and the string it uses to sign + // ID tokens with. For example "https://accounts.google.com". This value MUST + // match ID tokens exactly. + IssuerURL string + // AuthURL is the endpoint used by the provider to support the OAuth 2.0 + // authorization endpoint. + AuthURL string + // TokenURL is the endpoint used by the provider to support the OAuth 2.0 + // token endpoint. + TokenURL string + // DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0 + // device authorization endpoint. + DeviceAuthURL string + // UserInfoURL is the endpoint used by the provider to support the OpenID + // Connect UserInfo flow. + // + // https://openid.net/specs/openid-connect-core-1_0.html#UserInfo + UserInfoURL string + // JWKSURL is the endpoint used by the provider to advertise public keys to + // verify issued ID tokens. This endpoint is polled as new keys are made + // available. + JWKSURL string + + // Algorithms, if provided, indicate a list of JWT algorithms allowed to sign + // ID tokens. If not provided, this defaults to the algorithms advertised by + // the JWK endpoint, then the set of algorithms supported by this package. + Algorithms []string +} + +// NewProvider initializes a provider from a set of endpoints, rather than +// through discovery. +func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { + return &Provider{ + issuer: p.IssuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + deviceAuthURL: p.DeviceAuthURL, + userInfoURL: p.UserInfoURL, + jwksURL: p.JWKSURL, + algorithms: p.Algorithms, + client: getClient(ctx), + } +} + +// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider. +// +// The issuer is the URL identifier for the service. For example: "https://accounts.google.com" +// or "https://login.salesforce.com". +func NewProvider(ctx context.Context, issuer string) (*Provider, error) { + wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration" + req, err := http.NewRequest("GET", wellKnown, nil) + if err != nil { + return nil, err + } + resp, err := doRequest(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: %s", resp.Status, body) + } + + var p providerJSON + err = unmarshalResp(resp, body, &p) + if err != nil { + return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err) + } + + issuerURL, skipIssuerValidation := ctx.Value(issuerURLKey).(string) + if !skipIssuerValidation { + issuerURL = issuer + } + if p.Issuer != issuerURL && !skipIssuerValidation { + return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer) + } + var algs []string + for _, a := range p.Algorithms { + if supportedAlgorithms[a] { + algs = append(algs, a) + } + } + return &Provider{ + issuer: issuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + deviceAuthURL: p.DeviceAuthURL, + userInfoURL: p.UserInfoURL, + jwksURL: p.JWKSURL, + algorithms: algs, + rawClaims: body, + client: getClient(ctx), + }, nil +} + +// Claims unmarshals raw fields returned by the server during discovery. +// +// var claims struct { +// ScopesSupported []string `json:"scopes_supported"` +// ClaimsSupported []string `json:"claims_supported"` +// } +// +// if err := provider.Claims(&claims); err != nil { +// // handle unmarshaling error +// } +// +// For a list of fields defined by the OpenID Connect spec see: +// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata +func (p *Provider) Claims(v interface{}) error { + if p.rawClaims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(p.rawClaims, v) +} + +// Endpoint returns the OAuth2 auth and token endpoints for the given provider. +func (p *Provider) Endpoint() oauth2.Endpoint { + return oauth2.Endpoint{AuthURL: p.authURL, DeviceAuthURL: p.deviceAuthURL, TokenURL: p.tokenURL} +} + +// UserInfoEndpoint returns the OpenID Connect userinfo endpoint for the given +// provider. +func (p *Provider) UserInfoEndpoint() string { + return p.userInfoURL +} + +// UserInfo represents the OpenID Connect userinfo claims. +type UserInfo struct { + Subject string `json:"sub"` + Profile string `json:"profile"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + + claims []byte +} + +type userInfoRaw struct { + Subject string `json:"sub"` + Profile string `json:"profile"` + Email string `json:"email"` + // Handle providers that return email_verified as a string + // https://forums.aws.amazon.com/thread.jspa?messageID=949441󧳁 and + // https://discuss.elastic.co/t/openid-error-after-authenticating-against-aws-cognito/206018/11 + EmailVerified stringAsBool `json:"email_verified"` +} + +// Claims unmarshals the raw JSON object claims into the provided object. +func (u *UserInfo) Claims(v interface{}) error { + if u.claims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(u.claims, v) +} + +// UserInfo uses the token source to query the provider's user info endpoint. +func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) { + if p.userInfoURL == "" { + return nil, errors.New("oidc: user info endpoint is not supported by this provider") + } + + req, err := http.NewRequest("GET", p.userInfoURL, nil) + if err != nil { + return nil, fmt.Errorf("oidc: create GET request: %v", err) + } + + token, err := tokenSource.Token() + if err != nil { + return nil, fmt.Errorf("oidc: get access token: %v", err) + } + token.SetAuthHeader(req) + + resp, err := doRequest(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: %s", resp.Status, body) + } + + ct := resp.Header.Get("Content-Type") + mediaType, _, parseErr := mime.ParseMediaType(ct) + if parseErr == nil && mediaType == "application/jwt" { + payload, err := p.remoteKeySet().VerifySignature(ctx, string(body)) + if err != nil { + return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err) + } + body = payload + } + + var userInfo userInfoRaw + if err := json.Unmarshal(body, &userInfo); err != nil { + return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err) + } + return &UserInfo{ + Subject: userInfo.Subject, + Profile: userInfo.Profile, + Email: userInfo.Email, + EmailVerified: bool(userInfo.EmailVerified), + claims: body, + }, nil +} + +// IDToken is an OpenID Connect extension that provides a predictable representation +// of an authorization event. +// +// The ID Token only holds fields OpenID Connect requires. To access additional +// claims returned by the server, use the Claims method. +type IDToken struct { + // The URL of the server which issued this token. OpenID Connect + // requires this value always be identical to the URL used for + // initial discovery. + // + // Note: Because of a known issue with Google Accounts' implementation + // this value may differ when using Google. + // + // See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo + Issuer string + + // The client ID, or set of client IDs, that this token is issued for. For + // common uses, this is the client that initialized the auth flow. + // + // This package ensures the audience contains an expected value. + Audience []string + + // A unique string which identifies the end user. + Subject string + + // Expiry of the token. Ths package will not process tokens that have + // expired unless that validation is explicitly turned off. + Expiry time.Time + // When the token was issued by the provider. + IssuedAt time.Time + + // Initial nonce provided during the authentication redirect. + // + // This package does NOT provided verification on the value of this field + // and it's the user's responsibility to ensure it contains a valid value. + Nonce string + + // at_hash claim, if set in the ID token. Callers can verify an access token + // that corresponds to the ID token using the VerifyAccessToken method. + AccessTokenHash string + + // signature algorithm used for ID token, needed to compute a verification hash of an + // access token + sigAlgorithm string + + // Raw payload of the id_token. + claims []byte + + // Map of distributed claim names to claim sources + distributedClaims map[string]claimSource +} + +// Claims unmarshals the raw JSON payload of the ID Token into a provided struct. +// +// idToken, err := idTokenVerifier.Verify(rawIDToken) +// if err != nil { +// // handle error +// } +// var claims struct { +// Email string `json:"email"` +// EmailVerified bool `json:"email_verified"` +// } +// if err := idToken.Claims(&claims); err != nil { +// // handle error +// } +func (i *IDToken) Claims(v interface{}) error { + if i.claims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(i.claims, v) +} + +// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token +// matches the hash in the id token. It returns an error if the hashes don't match. +// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token +// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken +func (i *IDToken) VerifyAccessToken(accessToken string) error { + if i.AccessTokenHash == "" { + return errNoAtHash + } + var h hash.Hash + switch i.sigAlgorithm { + case RS256, ES256, PS256: + h = sha256.New() + case RS384, ES384, PS384: + h = sha512.New384() + case RS512, ES512, PS512, EdDSA: + h = sha512.New() + default: + return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm) + } + h.Write([]byte(accessToken)) // hash documents that Write will never return an error + sum := h.Sum(nil)[:h.Size()/2] + actual := base64.RawURLEncoding.EncodeToString(sum) + if actual != i.AccessTokenHash { + return errInvalidAtHash + } + return nil +} + +type idToken struct { + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience audience `json:"aud"` + Expiry jsonTime `json:"exp"` + IssuedAt jsonTime `json:"iat"` + NotBefore *jsonTime `json:"nbf"` + Nonce string `json:"nonce"` + AtHash string `json:"at_hash"` + ClaimNames map[string]string `json:"_claim_names"` + ClaimSources map[string]claimSource `json:"_claim_sources"` +} + +type claimSource struct { + Endpoint string `json:"endpoint"` + AccessToken string `json:"access_token"` +} + +type stringAsBool bool + +func (sb *stringAsBool) UnmarshalJSON(b []byte) error { + switch string(b) { + case "true", `"true"`: + *sb = true + case "false", `"false"`: + *sb = false + default: + return errors.New("invalid value for boolean") + } + return nil +} + +type audience []string + +func (a *audience) UnmarshalJSON(b []byte) error { + var s string + if json.Unmarshal(b, &s) == nil { + *a = audience{s} + return nil + } + var auds []string + if err := json.Unmarshal(b, &auds); err != nil { + return err + } + *a = auds + return nil +} + +type jsonTime time.Time + +func (j *jsonTime) UnmarshalJSON(b []byte) error { + var n json.Number + if err := json.Unmarshal(b, &n); err != nil { + return err + } + var unix int64 + + if t, err := n.Int64(); err == nil { + unix = t + } else { + f, err := n.Float64() + if err != nil { + return err + } + unix = int64(f) + } + *j = jsonTime(time.Unix(unix, 0)) + return nil +} + +func unmarshalResp(r *http.Response, body []byte, v interface{}) error { + err := json.Unmarshal(body, &v) + if err == nil { + return nil + } + ct := r.Header.Get("Content-Type") + mediaType, _, parseErr := mime.ParseMediaType(ct) + if parseErr == nil && mediaType == "application/json" { + return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err) + } + return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err) +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go new file mode 100644 index 0000000000..0bca49a899 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go @@ -0,0 +1,356 @@ +package oidc + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + jose "github.com/go-jose/go-jose/v3" + "golang.org/x/oauth2" +) + +const ( + issuerGoogleAccounts = "https://accounts.google.com" + issuerGoogleAccountsNoScheme = "accounts.google.com" +) + +// TokenExpiredError indicates that Verify failed because the token was expired. This +// error does NOT indicate that the token is not also invalid for other reasons. Other +// checks might have failed if the expiration check had not failed. +type TokenExpiredError struct { + // Expiry is the time when the token expired. + Expiry time.Time +} + +func (e *TokenExpiredError) Error() string { + return fmt.Sprintf("oidc: token is expired (Token Expiry: %v)", e.Expiry) +} + +// KeySet is a set of publc JSON Web Keys that can be used to validate the signature +// of JSON web tokens. This is expected to be backed by a remote key set through +// provider metadata discovery or an in-memory set of keys delivered out-of-band. +type KeySet interface { + // VerifySignature parses the JSON web token, verifies the signature, and returns + // the raw payload. Header and claim fields are validated by other parts of the + // package. For example, the KeySet does not need to check values such as signature + // algorithm, issuer, and audience since the IDTokenVerifier validates these values + // independently. + // + // If VerifySignature makes HTTP requests to verify the token, it's expected to + // use any HTTP client associated with the context through ClientContext. + VerifySignature(ctx context.Context, jwt string) (payload []byte, err error) +} + +// IDTokenVerifier provides verification for ID Tokens. +type IDTokenVerifier struct { + keySet KeySet + config *Config + issuer string +} + +// NewVerifier returns a verifier manually constructed from a key set and issuer URL. +// +// It's easier to use provider discovery to construct an IDTokenVerifier than creating +// one directly. This method is intended to be used with provider that don't support +// metadata discovery, or avoiding round trips when the key set URL is already known. +// +// This constructor can be used to create a verifier directly using the issuer URL and +// JSON Web Key Set URL without using discovery: +// +// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs") +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) +// +// Or a static key set (e.g. for testing): +// +// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}} +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) +func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier { + return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL} +} + +// Config is the configuration for an IDTokenVerifier. +type Config struct { + // Expected audience of the token. For a majority of the cases this is expected to be + // the ID of the client that initialized the login flow. It may occasionally differ if + // the provider supports the authorizing party (azp) claim. + // + // If not provided, users must explicitly set SkipClientIDCheck. + ClientID string + // If specified, only this set of algorithms may be used to sign the JWT. + // + // If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this + // defaults to the set of algorithms the provider supports. Otherwise this values + // defaults to RS256. + SupportedSigningAlgs []string + + // If true, no ClientID check performed. Must be true if ClientID field is empty. + SkipClientIDCheck bool + // If true, token expiry is not checked. + SkipExpiryCheck bool + + // SkipIssuerCheck is intended for specialized cases where the the caller wishes to + // defer issuer validation. When enabled, callers MUST independently verify the Token's + // Issuer is a known good value. + // + // Mismatched issuers often indicate client mis-configuration. If mismatches are + // unexpected, evaluate if the provided issuer URL is incorrect instead of enabling + // this option. + SkipIssuerCheck bool + + // Time function to check Token expiry. Defaults to time.Now + Now func() time.Time + + // InsecureSkipSignatureCheck causes this package to skip JWT signature validation. + // It's intended for special cases where providers (such as Azure), use the "none" + // algorithm. + // + // This option can only be enabled safely when the ID Token is received directly + // from the provider after the token exchange. + // + // This option MUST NOT be used when receiving an ID Token from sources other + // than the token endpoint. + InsecureSkipSignatureCheck bool +} + +// VerifierContext returns an IDTokenVerifier that uses the provider's key set to +// verify JWTs. As opposed to Verifier, the context is used for all requests to +// the upstream JWKs endpoint. +func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier { + return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config) +} + +// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs. +// +// The returned verifier uses a background context for all requests to the upstream +// JWKs endpoint. To control that context, use VerifierContext instead. +func (p *Provider) Verifier(config *Config) *IDTokenVerifier { + return p.newVerifier(p.remoteKeySet(), config) +} + +func (p *Provider) newVerifier(keySet KeySet, config *Config) *IDTokenVerifier { + if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 { + // Make a copy so we don't modify the config values. + cp := &Config{} + *cp = *config + cp.SupportedSigningAlgs = p.algorithms + config = cp + } + return NewVerifier(p.issuer, keySet, config) +} + +func parseJWT(p string) ([]byte, error) { + parts := strings.Split(p, ".") + if len(parts) < 2 { + return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts)) + } + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err) + } + return payload, nil +} + +func contains(sli []string, ele string) bool { + for _, s := range sli { + if s == ele { + return true + } + } + return false +} + +// Returns the Claims from the distributed JWT token +func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) { + req, err := http.NewRequest("GET", src.Endpoint, nil) + if err != nil { + return nil, fmt.Errorf("malformed request: %v", err) + } + if src.AccessToken != "" { + req.Header.Set("Authorization", "Bearer "+src.AccessToken) + } + + resp, err := doRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode) + } + + token, err := verifier.Verify(ctx, string(body)) + if err != nil { + return nil, fmt.Errorf("malformed response body: %v", err) + } + + return token.claims, nil +} + +// Verify parses a raw ID Token, verifies it's been signed by the provider, performs +// any additional checks depending on the Config, and returns the payload. +// +// Verify does NOT do nonce validation, which is the callers responsibility. +// +// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation +// +// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code")) +// if err != nil { +// // handle error +// } +// +// // Extract the ID Token from oauth2 token. +// rawIDToken, ok := oauth2Token.Extra("id_token").(string) +// if !ok { +// // handle error +// } +// +// token, err := verifier.Verify(ctx, rawIDToken) +func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) { + // Throw out tokens with invalid claims before trying to verify the token. This lets + // us do cheap checks before possibly re-syncing keys. + payload, err := parseJWT(rawIDToken) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + var token idToken + if err := json.Unmarshal(payload, &token); err != nil { + return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err) + } + + distributedClaims := make(map[string]claimSource) + + //step through the token to map claim names to claim sources" + for cn, src := range token.ClaimNames { + if src == "" { + return nil, fmt.Errorf("oidc: failed to obtain source from claim name") + } + s, ok := token.ClaimSources[src] + if !ok { + return nil, fmt.Errorf("oidc: source does not exist") + } + distributedClaims[cn] = s + } + + t := &IDToken{ + Issuer: token.Issuer, + Subject: token.Subject, + Audience: []string(token.Audience), + Expiry: time.Time(token.Expiry), + IssuedAt: time.Time(token.IssuedAt), + Nonce: token.Nonce, + AccessTokenHash: token.AtHash, + claims: payload, + distributedClaims: distributedClaims, + } + + // Check issuer. + if !v.config.SkipIssuerCheck && t.Issuer != v.issuer { + // Google sometimes returns "accounts.google.com" as the issuer claim instead of + // the required "https://accounts.google.com". Detect this case and allow it only + // for Google. + // + // We will not add hooks to let other providers go off spec like this. + if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) { + return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer) + } + } + + // If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty. + // + // This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party). + if !v.config.SkipClientIDCheck { + if v.config.ClientID != "" { + if !contains(t.Audience, v.config.ClientID) { + return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience) + } + } else { + return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set") + } + } + + // If a SkipExpiryCheck is false, make sure token is not expired. + if !v.config.SkipExpiryCheck { + now := time.Now + if v.config.Now != nil { + now = v.config.Now + } + nowTime := now() + + if t.Expiry.Before(nowTime) { + return nil, &TokenExpiredError{Expiry: t.Expiry} + } + + // If nbf claim is provided in token, ensure that it is indeed in the past. + if token.NotBefore != nil { + nbfTime := time.Time(*token.NotBefore) + // Set to 5 minutes since this is what other OpenID Connect providers do to deal with clock skew. + // https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/blob/6.12.2/src/Microsoft.IdentityModel.Tokens/TokenValidationParameters.cs#L149-L153 + leeway := 5 * time.Minute + + if nowTime.Add(leeway).Before(nbfTime) { + return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime) + } + } + } + + if v.config.InsecureSkipSignatureCheck { + return t, nil + } + + jws, err := jose.ParseSigned(rawIDToken) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + + switch len(jws.Signatures) { + case 0: + return nil, fmt.Errorf("oidc: id token not signed") + case 1: + default: + return nil, fmt.Errorf("oidc: multiple signatures on id token not supported") + } + + sig := jws.Signatures[0] + supportedSigAlgs := v.config.SupportedSigningAlgs + if len(supportedSigAlgs) == 0 { + supportedSigAlgs = []string{RS256} + } + + if !contains(supportedSigAlgs, sig.Header.Algorithm) { + return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm) + } + + t.sigAlgorithm = sig.Header.Algorithm + + ctx = context.WithValue(ctx, parsedJWTKey, jws) + gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken) + if err != nil { + return nil, fmt.Errorf("failed to verify signature: %v", err) + } + + // Ensure that the payload returned by the square actually matches the payload parsed earlier. + if !bytes.Equal(gotPayload, payload) { + return nil, errors.New("oidc: internal error, payload parsed did not match previous payload") + } + + return t, nil +} + +// Nonce returns an auth code option which requires the ID Token created by the +// OpenID Connect provider to contain the specified nonce. +func Nonce(nonce string) oauth2.AuthCodeOption { + return oauth2.SetAuthURLParam("nonce", nonce) +} diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go new file mode 100644 index 0000000000..e9bb0efe77 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go @@ -0,0 +1,1385 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "encoding" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +var ds = sync.Pool{ + New: func() any { + return new(decodeState) + }, +} + +func UnmarshalWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return nil, err + } + + d.init(data) + err = d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +func UnmarshalValid(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + return d.unmarshal(v) +} + +func UnmarshalValidWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + err := d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Pointer { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v any) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext + savedError error + useNumber bool + disallowUnknownFields bool + lastKeys []string +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + err.Field = strings.Join(d.errorContext.FieldStack, ".") + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the +// common case where we're decoding a literal. The decoder scans the input +// twice, once for syntax errors and to check the length of the value, and the +// second to perform the decoding. +// +// Only in the second step do we use decodeState to tokenize literals, so we +// know there aren't any syntax errors. We can take advantage of that knowledge, +// and scan a literal's bytes much more quickly. +func (d *decodeState) rescanLiteral() { + data, i := d.data, d.off +Switch: + switch data[i-1] { + case '"': // string + for ; i < len(data); i++ { + switch data[i] { + case '\\': + i++ // escaped char + case '"': + i++ // tokenize the closing quote too + break Switch + } + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number + for ; i < len(data); i++ { + switch data[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'e', 'E', '+', '-': + default: + break Switch + } + } + case 't': // true + i += len("rue") + case 'f': // false + i += len("alse") + case 'n': // null + i += len("ull") + } + if i < len(data) { + d.opcode = stateEndValue(&d.scan, data[i]) + } else { + d.opcode = scanEnd + } + d.off = i + 1 +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() any { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Pointer { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v interface{} + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields structFields + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + fields = cachedTypeFields(t) + // ok + default: + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + + var mapElem reflect.Value + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } + + var keys []string + + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquoteBytes(item) + if !ok { + panic(phasePanicMsg) + } + + keys = append(keys, string(key)) + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := t.Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + if i, ok := fields.nameIndex[string(key)]; ok { + // Found an exact name match. + f = &fields.list[i] + } else { + // Fall back to the expensive case-insensitive + // linear search. + for i := range fields.list { + ff := &fields.list[i] + if ff.equalFold(ff.nameBytes, key) { + f = ff + break + } + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Pointer { + if subv.IsNil() { + // If a struct embeds a pointer to an unexported type, + // it is not possible to set a newly allocated value + // since the field is unexported. + // + // See https://golang.org/issue/21357 + if !subv.CanSet() { + d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem())) + // Invalidate subv to ensure d.value(subv) skips over + // the JSON value without assigning it to subv. + subv = reflect.Value{} + destring = false + break + } + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) + d.errorContext.Struct = t + } else if d.disallowUnknownFields { + d.saveError(fmt.Errorf("json: unknown field %q", key)) + } + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + if err := d.literalStore(nullLiteral, subv, false); err != nil { + return err + } + case string: + if err := d.literalStore([]byte(qv), subv, true); err != nil { + return err + } + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + if err := d.value(subv); err != nil { + return err + } + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := t.Key() + var kv reflect.Value + switch { + case reflect.PointerTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(kt) + if err := d.literalStore(item, kv, true); err != nil { + return err + } + kv = kv.Elem() + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(kt) + default: + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + default: + panic("json: Unexpected key type") // should never occur + } + } + if kv.IsValid() { + v.SetMapIndex(kv, subv) + } + } + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + + if v.Kind() == reflect.Map { + d.lastKeys = keys + } + return nil +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (any, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + isNull := item[0] == 'n' // null + u, ut, pv := indirect(v, isNull) + if u != nil { + return u.UnmarshalJSON(item) + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + val := "number" + switch item[0] { + case 'n': + val = "null" + case 't', 'f': + val = "bool" + } + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) + return nil + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + return ut.UnmarshalText(s) + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "null" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := item[0] == 't' + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "true" && string(item) != "false" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + if v.Type() == numberType && !isValidNumber(string(s)) { + return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) + } + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. + v.SetString(s) + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() (val any) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []any { + var v = make([]any, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]any { + m := make(map[string]any) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() any { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go new file mode 100644 index 0000000000..a1819b16ac --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go @@ -0,0 +1,1473 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// So that the JSON will be safe to embed inside HTML