From 6d4fcde1c0dd154862739c811a6ea4b86f53acf3 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Thu, 16 Dec 2021 13:03:02 +0100 Subject: [PATCH] [release-4.8]: sync with openshift/kubernetes@release-4.8 To bring fixes from https://github.com/openshift/kubernetes/pull/1060 --- go.mod | 68 +- go.sum | 142 +-- .../bits-and-blooms/bitset/.gitignore | 26 + .../bits-and-blooms/bitset/.travis.yml | 37 + .../github.com/bits-and-blooms/bitset/LICENSE | 27 + .../bits-and-blooms/bitset/README.md | 93 ++ .../bitset/azure-pipelines.yml | 39 + .../bits-and-blooms/bitset/bitset.go | 952 +++++++++++++++++ .../github.com/bits-and-blooms/bitset/go.mod | 3 + .../github.com/bits-and-blooms/bitset/go.sum | 0 .../bits-and-blooms/bitset/popcnt.go | 53 + .../bits-and-blooms/bitset/popcnt_19.go | 45 + .../bits-and-blooms/bitset/popcnt_amd64.go | 68 ++ .../bits-and-blooms/bitset/popcnt_amd64.s | 104 ++ .../bits-and-blooms/bitset/popcnt_generic.go | 24 + .../bitset/trailing_zeros_18.go | 14 + .../bitset/trailing_zeros_19.go | 9 + .../golang/protobuf/proto/registry.go | 10 +- .../github.com/golang/protobuf/ptypes/any.go | 14 + .../github.com/golang/protobuf/ptypes/doc.go | 4 + .../golang/protobuf/ptypes/duration.go | 4 + .../golang/protobuf/ptypes/timestamp.go | 9 + .../google/go-cmp/cmp/cmpopts/equate.go | 8 - .../google/go-cmp/cmp/cmpopts/errors_go113.go | 15 + .../go-cmp/cmp/cmpopts/errors_xerrors.go | 18 + .../google/go-cmp/cmp/report_compare.go | 4 +- .../google/go-cmp/cmp/report_slices.go | 25 +- .../runc/libcontainer/cgroups/fs/freezer.go | 20 +- .../libcontainer/cgroups/systemd/common.go | 58 +- .../runc/libcontainer/cgroups/systemd/v1.go | 140 ++- .../runc/libcontainer/cgroups/systemd/v2.go | 30 +- .../runc/libcontainer/configs/cgroup_linux.go | 16 +- .../opencontainers/selinux/go-selinux/doc.go | 4 - .../selinux/go-selinux/label/label_linux.go | 5 +- .../selinux/go-selinux/selinux.go | 10 +- .../selinux/go-selinux/selinux_linux.go | 13 +- .../selinux/go-selinux/selinux_stub.go | 2 + ...piserver.openshift.io_apirequestcount.yaml | 1 + ...enshift_01_rolebindingrestriction.crd.yaml | 9 +- .../openshift/api/build/v1/generated.proto | 2 + .../openshift/api/build/v1/types.go | 2 + .../v1/zz_generated.swagger_doc_generated.go | 2 +- .../v1/001-cloudprivateipconfig.crd.yaml | 2 + ...rsion-operator_01_clusteroperator.crd.yaml | 39 +- ...ersion-operator_01_clusterversion.crd.yaml | 49 +- ...03_config-operator_01_operatorhub.crd.yaml | 15 +- .../0000_03_config-operator_01_proxy.crd.yaml | 13 +- ...0_10_config-operator_01_apiserver.crd.yaml | 25 +- ...config-operator_01_authentication.crd.yaml | 13 +- .../0000_10_config-operator_01_build.crd.yaml | 23 +- ...000_10_config-operator_01_console.crd.yaml | 13 +- .../0000_10_config-operator_01_dns.crd.yaml | 15 +- ...10_config-operator_01_featuregate.crd.yaml | 13 +- .../0000_10_config-operator_01_image.crd.yaml | 17 +- ...config-operator_01_infrastructure.crd.yaml | 11 +- ...000_10_config-operator_01_ingress.crd.yaml | 15 +- ...000_10_config-operator_01_network.crd.yaml | 13 +- .../0000_10_config-operator_01_oauth.crd.yaml | 13 +- ...000_10_config-operator_01_project.crd.yaml | 13 +- ...0_10_config-operator_01_scheduler.crd.yaml | 17 +- .../api/config/v1/types_apiserver.go | 6 +- .../v1/zz_generated.swagger_doc_generated.go | 2 +- .../v1/0000_10_consoleclidownload.crd.yaml | 29 +- .../0000_10_consoleexternalloglink.crd.yaml | 35 +- .../console/v1/0000_10_consolelink.crd.yaml | 37 +- .../v1/0000_10_consolenotification.crd.yaml | 33 +- .../v1/0000_10_consolequickstart.crd.yaml | 13 +- .../v1/0000_10_consoleyamlsample.crd.yaml | 13 +- .../v1alpha1/0000_10_consoleplugin.crd.yaml | 18 +- .../0000_10-helm-chart-repository.crd.yaml | 13 +- .../api/imageregistry/v1/00-crd.yaml | 25 +- .../api/imageregistry/v1/01-crd.yaml | 25 +- vendor/github.com/openshift/api/install.go | 2 + .../network/v1/001-clusternetwork-crd.yaml | 24 +- .../api/network/v1/002-hostsubnet-crd.yaml | 32 +- .../api/network/v1/003-netnamespace-crd.yaml | 22 +- .../v1/004-egressnetworkpolicy-crd.yaml | 10 +- .../v1/001-egressrouter.crd.yaml | 4 +- .../v1/001-egressrouter.crd.yaml-patch | 5 - ...0000_10_config-operator_01_config.crd.yaml | 17 +- .../0000_12_etcd-operator_01_config.crd.yaml | 17 +- ...kube-apiserver-operator_01_config.crd.yaml | 1 + ...roller-manager-operator_01_config.crd.yaml | 10 + ...kube-scheduler-operator_01_config.crd.yaml | 1 + ...hift-apiserver-operator_01_config.crd.yaml | 17 +- ...oud-credential-operator_00_config.crd.yaml | 13 +- ...rsion-migrator-operator_00_config.crd.yaml | 13 +- ...authentication-operator_01_config.crd.yaml | 13 +- ...roller-manager-operator_02_config.crd.yaml | 17 +- ...00_50_cluster_storage_operator_01_crd.yaml | 11 +- ...ess-operator_00-ingresscontroller.crd.yaml | 1 + .../0000_50_service-ca-operator_02_crd.yaml | 13 +- ...00_70_cluster-network-operator_01_crd.yaml | 13 +- .../v1/0000_70_console-operator.crd.yaml | 13 +- ...perator_00-custom-resource-definition.yaml | 10 +- ...i_snapshot_controller_operator_01_crd.yaml | 11 +- ...0_90_cluster_csi_driver_01_config.crd.yaml | 1 + .../openshift/api/operator/v1/types_dns.go | 11 +- .../v1/types_kubecontrollermanager.go | 8 + .../api/operator/v1/types_network.go | 2 +- .../v1/zz_generated.swagger_doc_generated.go | 10 +- ...rator_01_imagecontentsourcepolicy.crd.yaml | 17 +- ...10-pod-network-connectivity-check.crd.yaml | 9 +- ...openshift_01_clusterresourcequota.crd.yaml | 1 + .../samples/v1/0000_10_samplesconfig.crd.yaml | 29 +- ...0000_03_security-openshift_01_scc.crd.yaml | 53 +- .../pkg/admission/imagepolicy/accept.go | 17 +- .../apis/imagepolicy/v1/defaults.go | 1 + .../pkg/admission/imagepolicy/imagepolicy.go | 13 +- .../sccadmission/admission.go | 59 +- .../sccmatching/matcher.go | 78 +- .../sccmatching/provider.go | 21 +- .../securitycontextconstraints/util/util.go | 65 ++ .../pkg/controller/factory/eventfilters.go | 26 + .../resource/resourceapply/generic.go | 25 +- .../resource/resourceapply/migration.go | 46 + .../operator/resource/resourceapply/policy.go | 47 + .../resource/resourceread/migration.go | 26 + .../operator/resource/resourceread/policy.go | 25 + vendor/github.com/sirupsen/logrus/.travis.yml | 14 +- .../github.com/sirupsen/logrus/CHANGELOG.md | 36 + vendor/github.com/sirupsen/logrus/README.md | 2 +- vendor/github.com/sirupsen/logrus/entry.go | 73 +- vendor/github.com/sirupsen/logrus/go.sum | 2 - .../sirupsen/logrus/json_formatter.go | 5 +- vendor/github.com/sirupsen/logrus/logger.go | 2 +- .../sirupsen/logrus/terminal_check_unix.go | 2 +- .../sirupsen/logrus/text_formatter.go | 7 +- .../encoding/simplifiedchinese/hzgb2312.go | 2 +- .../x/text/internal/language/language.go | 90 +- .../x/text/internal/language/parse.go | 37 +- vendor/golang.org/x/text/language/go1_1.go | 1 + vendor/golang.org/x/text/language/go1_2.go | 1 + vendor/golang.org/x/text/language/language.go | 4 + vendor/golang.org/x/text/language/tables.go | 8 +- .../x/text/secure/bidirule/bidirule10.0.0.go | 1 + .../x/text/secure/bidirule/bidirule9.0.0.go | 1 + vendor/golang.org/x/text/unicode/bidi/bidi.go | 221 +++- vendor/golang.org/x/text/unicode/bidi/core.go | 63 +- .../x/text/unicode/bidi/tables10.0.0.go | 1 + .../x/text/unicode/bidi/tables11.0.0.go | 1 + .../x/text/unicode/bidi/tables12.0.0.go | 1 + .../x/text/unicode/bidi/tables13.0.0.go | 1 + .../x/text/unicode/bidi/tables9.0.0.go | 1 + .../x/text/unicode/norm/tables10.0.0.go | 1 + .../x/text/unicode/norm/tables11.0.0.go | 1 + .../x/text/unicode/norm/tables12.0.0.go | 1 + .../x/text/unicode/norm/tables13.0.0.go | 1 + .../x/text/unicode/norm/tables9.0.0.go | 1 + .../golang.org/x/text/width/tables10.0.0.go | 1 + .../golang.org/x/text/width/tables11.0.0.go | 1 + .../golang.org/x/text/width/tables12.0.0.go | 1 + .../golang.org/x/text/width/tables13.0.0.go | 1 + vendor/golang.org/x/text/width/tables9.0.0.go | 1 + .../protobuf/encoding/protojson/decode.go | 42 +- .../protobuf/encoding/protojson/encode.go | 228 ++-- .../encoding/protojson/well_known_types.go | 42 +- .../protobuf/encoding/prototext/decode.go | 30 +- .../protobuf/encoding/prototext/encode.go | 84 +- .../protobuf/internal/descfmt/stringer.go | 2 + .../protobuf/internal/detrand/rand.go | 8 + .../internal/encoding/json/decode_token.go | 5 +- .../encoding/messageset/messageset.go | 33 +- .../protobuf/internal/encoding/tag/tag.go | 2 +- .../protobuf/internal/encoding/text/encode.go | 8 +- .../protobuf/internal/fieldsort/fieldsort.go | 40 - .../protobuf/internal/filedesc/build.go | 3 + .../protobuf/internal/filedesc/desc.go | 77 +- .../protobuf/internal/filedesc/desc_lazy.go | 4 +- .../protobuf/internal/filedesc/desc_list.go | 172 +++- .../internal/filedesc/desc_list_gen.go | 11 + .../protobuf/internal/impl/api_export.go | 2 +- .../protobuf/internal/impl/codec_field.go | 18 +- .../protobuf/internal/impl/codec_gen.go | 974 +++++++++--------- .../protobuf/internal/impl/codec_map.go | 19 +- .../protobuf/internal/impl/codec_message.go | 68 +- .../internal/impl/codec_messageset.go | 21 +- .../protobuf/internal/impl/codec_reflect.go | 8 +- .../protobuf/internal/impl/convert.go | 29 + .../protobuf/internal/impl/decode.go | 16 +- .../protobuf/internal/impl/encode.go | 10 +- .../protobuf/internal/impl/legacy_export.go | 2 +- .../internal/impl/legacy_extension.go | 3 +- .../protobuf/internal/impl/legacy_message.go | 122 ++- .../protobuf/internal/impl/merge.go | 6 +- .../protobuf/internal/impl/message.go | 69 +- .../protobuf/internal/impl/message_reflect.go | 125 ++- .../internal/impl/message_reflect_field.go | 85 +- .../protobuf/internal/impl/pointer_reflect.go | 1 + .../protobuf/internal/impl/pointer_unsafe.go | 1 + .../protobuf/internal/mapsort/mapsort.go | 43 - .../protobuf/internal/order/order.go | 89 ++ .../protobuf/internal/order/range.go | 115 +++ .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 18 +- .../protobuf/proto/decode_gen.go | 128 +-- .../protobuf/proto/encode.go | 55 +- .../google.golang.org/protobuf/proto/equal.go | 25 +- .../protobuf/proto/messageset.go | 7 +- .../google.golang.org/protobuf/proto/proto.go | 9 + .../protobuf/reflect/protodesc/desc.go | 276 +++++ .../protobuf/reflect/protodesc/desc_init.go | 248 +++++ .../reflect/protodesc/desc_resolve.go | 286 +++++ .../reflect/protodesc/desc_validate.go | 374 +++++++ .../protobuf/reflect/protodesc/proto.go | 252 +++++ .../protobuf/reflect/protoreflect/source.go | 84 +- .../reflect/protoreflect/source_gen.go | 461 +++++++++ .../protobuf/reflect/protoreflect/type.go | 34 + .../reflect/protoregistry/registry.go | 157 ++- .../types/descriptorpb/descriptor.pb.go | 19 +- .../protobuf/types/known/anypb/any.pb.go | 22 +- .../types/known/durationpb/duration.pb.go | 20 +- .../types/known/fieldmaskpb/field_mask.pb.go | 23 +- .../types/known/timestamppb/timestamp.pb.go | 29 +- .../types/known/wrapperspb/wrappers.pb.go | 19 +- .../protobuf/types/pluginpb/plugin.pb.go | 63 +- .../pkg/apis/apiextensions/v1/conversion.go | 19 +- .../pkg/apis/apiextensions/v1/marshal.go | 3 +- .../apis/apiextensions/v1beta1/conversion.go | 17 +- .../pkg/apis/apiextensions/v1beta1/marshal.go | 3 +- .../third_party/forked/golang/LICENSE | 27 + .../third_party/forked/golang/PATENTS | 22 + .../handlers/fieldmanager/admission.go | 5 +- .../fieldmanager/lastappliedupdater.go | 20 +- .../pkg/endpoints/metrics/metrics.go | 17 +- .../generic/registry/decorated_watcher.go | 22 +- .../pkg/registry/generic/registry/store.go | 4 +- vendor/k8s.io/apiserver/pkg/server/config.go | 5 + .../pkg/server/deprecated_insecure_serving.go | 3 + .../server/egressselector/egress_selector.go | 23 +- .../apiserver/pkg/server/secure_serving.go | 7 +- .../apiserver/pkg/storage/etcd3/store.go | 30 + .../pkg/util/flowcontrol/apf_controller.go | 49 +- .../third_party/forked/golang/LICENSE | 27 + .../third_party/forked/golang/PATENTS | 22 + .../k8s.io/client-go/tools/cache/reflector.go | 4 +- .../tools/clientcmd/api/v1/conversion.go | 2 +- vendor/k8s.io/cloud-provider/go.mod | 17 +- vendor/k8s.io/cloud-provider/go.sum | 48 +- .../apimachinery/lease/controller.go | 2 +- vendor/k8s.io/csi-translation-lib/go.mod | 6 +- vendor/k8s.io/csi-translation-lib/go.sum | 73 +- vendor/k8s.io/klog/v2/klog.go | 27 +- .../pkg/apiserver/apiserver.go | 1 + .../pkg/apiserver/handler_proxy.go | 40 +- .../status/available_controller.go | 22 + .../pkg/polymorphichelpers/logsforobject.go | 33 +- .../kubernetes/cmd/kube-proxy/app/server.go | 37 + .../cmd/kube-proxy/app/server_others.go | 33 - .../cmd/kube-proxy/app/server_windows.go | 31 +- .../cmd/kubelet/app/server_linux.go | 2 + .../managementcpusoverride/admission.go | 14 +- .../cr_validation_registration.go | 5 + .../validate_kubecontrollermanager.go | 113 ++ .../apiaccess_count_controller.go | 62 +- .../deprecatedapirequest/request_counts.go | 2 +- .../deprecatedapirequest/update_func.go | 6 +- .../deprecatedapirequest/v1helpers/helpers.go | 51 +- .../pkg/apis/core/validation/events.go | 4 +- .../pkg/apis/extensions/v1beta1/conversion.go | 4 +- .../pkg/apis/networking/v1beta1/conversion.go | 4 +- .../volume/scheduling/metrics/metrics.go | 2 +- .../pkg/kubelet/cm/cgroup_manager_linux.go | 3 +- .../cm/internal_container_lifecycle.go | 5 +- .../topologymanager/fake_topology_manager.go | 5 +- .../pkg/kubelet/cm/topologymanager/scope.go | 28 +- .../cm/topologymanager/scope_container.go | 3 +- .../kubelet/cm/topologymanager/scope_pod.go | 3 +- .../cm/topologymanager/topology_manager.go | 6 +- .../kubernetes/pkg/kubelet/kubelet_volumes.go | 122 ++- .../pkg/kubelet/logs/container_log_manager.go | 7 +- .../kubernetes/pkg/kubelet/server/server.go | 4 + .../util/manager/watch_based_manager.go | 11 +- .../cache/actual_state_of_world.go | 5 + .../kubernetes/pkg/proxy/iptables/proxier.go | 3 + .../k8s.io/kubernetes/pkg/proxy/topology.go | 6 +- .../kubernetes/pkg/proxy/winkernel/proxier.go | 142 ++- .../pkg/registry/core/service/storage/rest.go | 11 +- .../pkg/registry/core/service/strategy.go | 63 ++ .../plugins/noderesources/most_allocated.go | 4 +- .../kubernetes/pkg/scheduler/util/utils.go | 18 +- .../kubernetes/pkg/util/removeall/OWNERS | 8 + .../pkg/util/removeall/removeall.go | 36 +- .../kubernetes/pkg/volume/csi/csi_mounter.go | 3 +- .../pkg/volume/util/hostutil/fake_hostutil.go | 2 +- .../volume/util/hostutil/hostutil_linux.go | 13 +- .../pkg/volume/util/subpath/subpath_linux.go | 3 +- .../e2e/apimachinery/garbage_collector.go | 2 +- .../kubernetes/test/e2e/auth/node_authn.go | 18 +- .../test/e2e/framework/framework.go | 4 +- .../e2e/framework/metrics/kubelet_metrics.go | 2 +- .../e2e/framework/metrics/metrics_grabber.go | 186 +++- .../kubernetes/test/e2e/framework/pod/dial.go | 215 ++++ .../test/e2e/framework/service/jig.go | 4 + .../kubernetes/test/e2e/generated/bindata.go | 395 ++++--- .../monitoring/metrics_grabber.go | 45 +- .../test/e2e/network/networking_perf.go | 5 +- .../kubernetes/test/e2e/network/service.go | 18 +- .../test/e2e/scheduling/priorities.go | 32 +- .../drivers/csi-test/mock/service/service.go | 4 +- .../test/e2e/storage/drivers/csi.go | 77 +- .../test/e2e/storage/drivers/in_tree.go | 2 + .../test/e2e/storage/external/external.go | 8 + .../storage/framework/snapshot_resource.go | 14 +- .../test/e2e/storage/framework/testdriver.go | 6 +- .../e2e/storage/persistent_volumes-local.go | 10 +- .../test/e2e/storage/regional_pd.go | 22 +- .../test/e2e/storage/testsuites/base.go | 19 +- .../e2e/storage/testsuites/multivolume.go | 2 +- .../e2e/storage/testsuites/provisioning.go | 10 +- .../e2e/storage/testsuites/snapshottable.go | 8 +- .../test/e2e/storage/testsuites/subpath.go | 7 +- .../test/e2e/storage/testsuites/topology.go | 2 +- .../e2e/storage/testsuites/volume_expand.go | 6 +- .../test/e2e/storage/testsuites/volume_io.go | 2 +- .../e2e/storage/testsuites/volume_stress.go | 2 +- .../test/e2e/storage/testsuites/volumemode.go | 2 +- .../test/e2e/storage/testsuites/volumes.go | 2 +- .../e2e/storage/ubernetes_lite_volumes.go | 150 --- .../test/e2e/storage/volume_metrics.go | 2 +- .../test/e2e/storage/volume_provisioning.go | 1 + vendor/k8s.io/kubernetes/test/e2e/suites.go | 11 +- .../test/e2e/upgrades/upgrade_suite.go | 5 +- .../kubernetes/test/utils/image/manifest.go | 79 +- .../third_party/forked/golang/LICENSE | 2 +- .../k8s.io/legacy-cloud-providers/aws/aws.go | 58 +- .../aws/aws_loadbalancer.go | 183 ++-- .../legacy-cloud-providers/azure/azure.go | 25 + .../azure/azure_controller_common.go | 3 + .../azure/azure_instances.go | 19 +- .../azure/azure_loadbalancer.go | 85 +- .../azure/azure_managedDiskController.go | 5 + .../azure/azure_routes.go | 48 +- .../azure/azure_standard.go | 52 +- .../azure/azure_utils.go | 25 + .../azure/azure_vmsets.go | 5 +- .../azure/azure_vmss.go | 133 ++- .../azure/azure_vmss_cache.go | 39 +- .../azure/retry/azure_error.go | 2 +- .../vsphere/shared_datastore.go | 4 +- .../legacy-cloud-providers/vsphere/vsphere.go | 9 +- .../vsphere/vsphere_util.go | 2 +- vendor/k8s.io/mount-utils/fake_mounter.go | 4 + vendor/k8s.io/mount-utils/go.mod | 2 +- vendor/k8s.io/mount-utils/go.sum | 4 +- vendor/k8s.io/mount-utils/mount.go | 2 + vendor/k8s.io/mount-utils/mount_linux.go | 35 +- .../k8s.io/mount-utils/mount_unsupported.go | 5 + vendor/k8s.io/mount-utils/mount_windows.go | 6 + vendor/modules.txt | 138 +-- .../konnectivity-client/pkg/client/client.go | 50 +- .../kube-storage-version-migrator/LICENSE | 202 ++++ .../pkg/apis/migration/v1alpha1/doc.go | 20 + .../pkg/apis/migration/v1alpha1/register.go | 54 + .../pkg/apis/migration/v1alpha1/types.go | 187 ++++ .../v1alpha1/zz_generated.deepcopy.go | 275 +++++ .../pkg/clients/clientset/clientset.go | 97 ++ .../pkg/clients/clientset/doc.go | 20 + .../pkg/clients/clientset/scheme/doc.go | 20 + .../pkg/clients/clientset/scheme/register.go | 56 + .../clientset/typed/migration/v1alpha1/doc.go | 20 + .../migration/v1alpha1/generated_expansion.go | 23 + .../migration/v1alpha1/migration_client.go | 94 ++ .../typed/migration/v1alpha1/storagestate.go | 184 ++++ .../v1alpha1/storageversionmigration.go | 184 ++++ .../v4/typed/reconcile_schema.go | 71 +- .../structured-merge-diff/v4/typed/remove.go | 37 +- 367 files changed, 11279 insertions(+), 3465 deletions(-) create mode 100644 vendor/github.com/bits-and-blooms/bitset/.gitignore create mode 100644 vendor/github.com/bits-and-blooms/bitset/.travis.yml create mode 100644 vendor/github.com/bits-and-blooms/bitset/LICENSE create mode 100644 vendor/github.com/bits-and-blooms/bitset/README.md create mode 100644 vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml create mode 100644 vendor/github.com/bits-and-blooms/bitset/bitset.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/go.mod create mode 100644 vendor/github.com/bits-and-blooms/bitset/go.sum create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_19.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go delete mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/PATENTS create mode 100644 vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/removeall/OWNERS create mode 100644 vendor/k8s.io/kubernetes/test/e2e/framework/pod/dial.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go diff --git a/go.mod b/go.mod index 54e97972e1b1..667ff2e1f05e 100644 --- a/go.mod +++ b/go.mod @@ -15,8 +15,8 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/go-ozzo/ozzo-validation v3.5.0+incompatible // indirect - github.com/golang/protobuf v1.4.3 - github.com/google/go-cmp v0.5.4 + github.com/golang/protobuf v1.5.0 + github.com/google/go-cmp v0.5.5 github.com/google/uuid v1.1.2 github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6 // indirect github.com/lestrrat-go/jspointer v0.0.0-20181205001929-82fadba7561c // indirect @@ -31,11 +31,11 @@ require ( github.com/onsi/ginkgo v4.7.0-origin.0+incompatible github.com/onsi/gomega v1.7.0 github.com/opencontainers/go-digest v1.0.0 - github.com/openshift/api v0.0.0-20210521075222-e273a339932a - github.com/openshift/apiserver-library-go v0.0.0-20210521113822-91c23a9a7ddf + github.com/openshift/api v0.0.0-20210910062324-a41d3573a3ba + github.com/openshift/apiserver-library-go v0.0.0-20211116020226-339bb71f9a26 github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 - github.com/openshift/library-go v0.0.0-20210521084623-7392ea9b02ca + github.com/openshift/library-go v0.0.0-20211109160828-8c48fafbad15 github.com/pborman/uuid v1.2.0 github.com/pquerna/cachecontrol v0.0.0-20201205024021-ac21108117ac // indirect github.com/prometheus/client_golang v1.7.1 @@ -63,7 +63,7 @@ require ( k8s.io/client-go v0.21.1 k8s.io/component-base v0.21.1 k8s.io/component-helpers v0.0.0 - k8s.io/klog/v2 v2.8.0 + k8s.io/klog/v2 v2.9.0 k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 k8s.io/kubectl v0.21.1 k8s.io/kubelet v0.21.1 @@ -75,32 +75,32 @@ require ( replace ( github.com/onsi/ginkgo => github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible - github.com/opencontainers/runc => github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e - k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210629175304-1622f8729964 - k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210629175304-1622f8729964 - k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210629175304-1622f8729964 - k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210629175304-1622f8729964 - k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210629175304-1622f8729964 - k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210629175304-1622f8729964 - k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210629175304-1622f8729964 - k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210629175304-1622f8729964 - k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210629175304-1622f8729964 - k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210629175304-1622f8729964 - k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210629175304-1622f8729964 - k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210629175304-1622f8729964 - k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210629175304-1622f8729964 - k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210629175304-1622f8729964 - k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210629175304-1622f8729964 - k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210629175304-1622f8729964 - k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210629175304-1622f8729964 - k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210629175304-1622f8729964 - k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210629175304-1622f8729964 - k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210629175304-1622f8729964 - k8s.io/kubernetes => github.com/openshift/kubernetes v1.21.2-0.20210629175304-1622f8729964 - k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210629175304-1622f8729964 - k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210629175304-1622f8729964 - k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210629175304-1622f8729964 - k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210629175304-1622f8729964 - k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20210629175304-1622f8729964 - k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20210629175304-1622f8729964 + github.com/opencontainers/runc => github.com/kolyshkin/runc v1.0.0-rc95.0.20211216181318-624f590e289a + k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20211215000257-b4b48133acd2 + k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20211215000257-b4b48133acd2 + k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20211215000257-b4b48133acd2 + k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20211215000257-b4b48133acd2 + k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20211215000257-b4b48133acd2 + k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20211215000257-b4b48133acd2 + k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20211215000257-b4b48133acd2 + k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20211215000257-b4b48133acd2 + k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20211215000257-b4b48133acd2 + k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20211215000257-b4b48133acd2 + k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20211215000257-b4b48133acd2 + k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20211215000257-b4b48133acd2 + k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20211215000257-b4b48133acd2 + k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20211215000257-b4b48133acd2 + k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20211215000257-b4b48133acd2 + k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20211215000257-b4b48133acd2 + k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20211215000257-b4b48133acd2 + k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20211215000257-b4b48133acd2 + k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20211215000257-b4b48133acd2 + k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20211215000257-b4b48133acd2 + k8s.io/kubernetes => github.com/openshift/kubernetes v1.21.3-0.20211215000257-b4b48133acd2 + k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20211215000257-b4b48133acd2 + k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20211215000257-b4b48133acd2 + k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20211215000257-b4b48133acd2 + k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20211215000257-b4b48133acd2 + k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20211215000257-b4b48133acd2 + k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20211215000257-b4b48133acd2 ) diff --git a/go.sum b/go.sum index d5abacc64232..db1d20eca175 100644 --- a/go.sum +++ b/go.sum @@ -109,6 +109,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= +github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -358,8 +360,9 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= @@ -378,8 +381,9 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -490,6 +494,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/kolyshkin/runc v1.0.0-rc95.0.20211216181318-624f590e289a h1:kPmi+cHfzRxHM+fos8bPGIZ3vUSAW2DFXmrXqoFABog= +github.com/kolyshkin/runc v1.0.0-rc95.0.20211216181318-624f590e289a/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -625,69 +631,70 @@ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/openshift/api v0.0.0-20210521075222-e273a339932a h1:aBPwLqCg66SbQd+HrjB1GhgTfPtqSY4aeB022tEYmE0= +github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/openshift/api v0.0.0-20210521075222-e273a339932a/go.mod h1:izBmoXbUu3z5kUa4FjZhvekTsyzIWiOoaIgJiZBBMQs= -github.com/openshift/apiserver-library-go v0.0.0-20210521113822-91c23a9a7ddf h1:b1YLQ5SAbjb/GmWpfmS5z6bVTWCJF+ywpd673LqbScc= -github.com/openshift/apiserver-library-go v0.0.0-20210521113822-91c23a9a7ddf/go.mod h1:lhfpWyUaEs2xLx+eTgz012fNzRKiG7XYJ5QcQAgtyRQ= +github.com/openshift/api v0.0.0-20210910062324-a41d3573a3ba h1:aimcu15biJzq4gp0LPsa4cOucfiILIgy/PimUO9ncpc= +github.com/openshift/api v0.0.0-20210910062324-a41d3573a3ba/go.mod h1:izBmoXbUu3z5kUa4FjZhvekTsyzIWiOoaIgJiZBBMQs= +github.com/openshift/apiserver-library-go v0.0.0-20211116020226-339bb71f9a26 h1:FcIfwy4r1Pog+cgpqhtq2hrbfZ0yDduSdABT9nJYKu0= +github.com/openshift/apiserver-library-go v0.0.0-20211116020226-339bb71f9a26/go.mod h1:hmRcqTWiLRXXEnVLhCNoZBfmciZD2N2NrHTEzcRqhK8= github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e h1:F7rBobgSjtYL3/zsgDUjlTVx3Z06hdgpoldpDcn7jzc= github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 h1:ZHRIMCFIJN1p9LsJt4HQ+akDrys4PrYnXzOWI5LK03I= github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142/go.mod h1:fjS8r9mqDVsPb5td3NehsNOAWa4uiFkYEfVZioQ2gH0= -github.com/openshift/kubernetes v1.21.2-0.20210629175304-1622f8729964 h1:OCW8wWNq1u21dEvaymT8MJvKosEJ0ep9kIW7tlYMdHs= -github.com/openshift/kubernetes v1.21.2-0.20210629175304-1622f8729964/go.mod h1:OlE3ItKYtdYc50NKwzy5KWCY1jiOp1AQE0XHSjfjc2Q= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210629175304-1622f8729964 h1:cEQucvuA3XgIWFUIheyLirv2X1hJNM1eATOaAS7qrWE= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210629175304-1622f8729964/go.mod h1:5odM9qXnvk/dO0K8eHCRb9MBNpN/SygEJM9L8AAOk6E= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210629175304-1622f8729964 h1:iKmve9c2IK/dGRrzZ+GmpDj9KZJ7ddHXFZBq0FEoH04= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210629175304-1622f8729964/go.mod h1:rvbhvlOSIrlenGwA4C2EIgx1C956C/cAX7aHYjImXwQ= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210629175304-1622f8729964 h1:Wsf7xc7XNaFedzNBnhLyDEDkyXuuREImCaDHPRHgDIw= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210629175304-1622f8729964/go.mod h1:+9NIFJkht9qnam6CoZabYqGs1X6zR24wZHCUhYGa8XM= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210629175304-1622f8729964 h1:RKoFrhS/c50IgAnN+saWwXno/iEXb9rqWAEOzeEz4Q8= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210629175304-1622f8729964/go.mod h1:Tev7I/2hNILK5romDDB88XUaQ0ivvI9tJjVzGhZa1zs= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210629175304-1622f8729964 h1:PDrKVJskKKC+sU1LC407vy8KwXoAm+GSqSuvBe8E1ro= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210629175304-1622f8729964/go.mod h1:aHONBjzllnRfpTuDB+bio/W+lJKheHQ6qcxAVHBzHFg= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210629175304-1622f8729964 h1:yn8fm4xkrcTZB6E28hGajEhJgFdt03Bs7801U5Oorg4= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210629175304-1622f8729964/go.mod h1:s4qyeR7ffm9Vm04y1Cmal04ZY9FzdOamjk/lMq4gLeE= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210629175304-1622f8729964 h1:lom6moGIfnoZr6qfeB6YsLdxUl63A1LDuKvn6BbAqHE= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210629175304-1622f8729964/go.mod h1:i/aIDGDZBEun0WLdRT0BA5tUhYTeNQdwuYNYl7xK/rE= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210629175304-1622f8729964 h1:h6iKGDd1VOrcHAksUzetGKn5k2l7Eyi/V1WjR4aM8A4= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210629175304-1622f8729964/go.mod h1:60txAiFf+HzsdIr3xT3hvqn+hwKz0cU3hEkeuLmKLQM= -github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210629175304-1622f8729964/go.mod h1:+2XbIeiOA7U4ptBBhf8CzvC/VllM/i8fj+f/Q6au5vA= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210629175304-1622f8729964 h1:loYl/mgbI0cff3uuKuz+Nk7eK69DH3vmdbKYGP35Noo= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210629175304-1622f8729964/go.mod h1:ZsYj+LAOMewPhu0swN6I2O4cmmN2GkqudDNMXv4mj60= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210629175304-1622f8729964 h1:QGMKEOS87xPFPgWPKMk6Q7OV4iYYe/FtzPMsb+258HE= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210629175304-1622f8729964/go.mod h1:If3eXdFPvI+A7qLvMR+D4a6/5ft6VgbCs+rx51QobqM= -github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210629175304-1622f8729964/go.mod h1:Vj9fBIaxvmK1dm7wIjIZw0B6PbNUA7yyTyf2/lwt2m4= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210629175304-1622f8729964 h1:Z07DrA5AxyyBJASlC1Qn9NGP5/NVQUIuRm5ZP3urkbU= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210629175304-1622f8729964/go.mod h1:8a9+wxOscdSWUhL3k9ZL59Q/DmUJ0wlOAMalnNTNDSs= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210629175304-1622f8729964 h1:KVqaH0MwuzqQxWVgEL6HI366HaUS4LlkdAExK2D85Kw= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210629175304-1622f8729964/go.mod h1:juK8ItOGGiCVV3KZh7/Jm/+QJHkzpLEmPg15ovsC5nw= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210629175304-1622f8729964 h1:WqpKRuhMS1nOKCQHjVALIvq+RMX/+Wu1vMHxT7hpyMw= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210629175304-1622f8729964/go.mod h1:NadQFap7606zvRwby6hYFiBcXjk00Z3A8eQlZW3amIo= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210629175304-1622f8729964/go.mod h1:xONYhboWRO+jBbcHH8QFU3s19PyEFF/RfsvhtKHa3eI= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210629175304-1622f8729964 h1:VU3UpQQxFBZ0sNVCAviGhd8QgMzyKauyIi+Iu6tTmwc= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210629175304-1622f8729964/go.mod h1:Orfp0jnRIGgoOmRs40Bz9NEGiK0r4FdL3RrnlyAC65o= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210629175304-1622f8729964 h1:xT9z6AaZG2z2uAJmHAjIyuZL7jI/+4fkOGG29B3Ugx8= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210629175304-1622f8729964/go.mod h1:EBlODbbqjii99HCfAGTHvptjj3CwlzO9XXf7W1p4zNg= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210629175304-1622f8729964 h1:lBH72cGnruY+mM8d3laE65TbrloRU4jnWV5vnoKg9x8= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210629175304-1622f8729964/go.mod h1:ERv6pgjGhWQ3qBUoAxsPujP3PDtgQC46aw/52A5bVE0= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210629175304-1622f8729964 h1:7JzAYTzR0CP+kbxuav1Fm4s8SzzkPvYUJ6AJRd1qLMc= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210629175304-1622f8729964/go.mod h1:ubZB7tOrDLJIcsCvtA15lyNAjLPMml0LUeMm1WVNi80= -github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210629175304-1622f8729964 h1:cg2ytg49jibL/Zfk9q3YZ8oNj/Sk9/p7mEOIXUrr2lU= -github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210629175304-1622f8729964/go.mod h1:dX9lYn+OmdwOIh2tAuuGc0m2K0vZwFynmcsvDwdOkrE= -github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210629175304-1622f8729964 h1:lIG8x5jaoCTwnrIN3Cjbzs4UDOJth7MkLfb7WAPQiCE= -github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210629175304-1622f8729964/go.mod h1:R3N6WaOJSt9L2VSYsKFYaz6keliSzDbCbw03STRdEQI= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210629175304-1622f8729964 h1:ScdUT64/iHr/iiBEET0fK0WlX+lAH2lvCErmnTL5OdI= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210629175304-1622f8729964/go.mod h1:5O5i/CheboeAm/bnttfTi3dcpL5KBRjYjFQQVYaaP28= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210629175304-1622f8729964 h1:E9ezZ5Ivdks52UYC4FhRDpk1f4gHmIq0kkf+GH6CKB0= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210629175304-1622f8729964/go.mod h1:tWKWmokpVN3JsAvixr41tCj0vywHYtX0oXmVCvCYPkw= -github.com/openshift/library-go v0.0.0-20210521084623-7392ea9b02ca h1:NtRAdQTnE4B+UESOUaCSX3dw1uc+PpI1h2X7hUmE/5A= +github.com/openshift/kubernetes v1.21.3-0.20211215000257-b4b48133acd2 h1:VAJQyAj4GboUsy83EOLPg40uqBQP8PiBF7P1ErNzaEQ= +github.com/openshift/kubernetes v1.21.3-0.20211215000257-b4b48133acd2/go.mod h1:jWBCO2lJwEbjI7batKjXwhUdBv4l5v/BUIqzAVK+qIg= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20211215000257-b4b48133acd2 h1:EoPX4oLO5QzEodwne+qLRT29vbkv+F6YBOFVH73z13s= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20211215000257-b4b48133acd2/go.mod h1:2HmFHiW+MLivosPLJA6tpvBmyXqvY3suB8zaHBQJIQA= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20211215000257-b4b48133acd2 h1:7FBqAlzAosJCw8kjp9RGfMfIlFcDRwm0rR2qDSS4bFc= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20211215000257-b4b48133acd2/go.mod h1:KmkIdtuiX/LVhoYImjGLzrL+6FLUzhYnY12/C7L6t58= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20211215000257-b4b48133acd2 h1:aXAgKqPTDq3M93FaofwHhZrvfcdwuKuPF/KiZEKOSjE= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20211215000257-b4b48133acd2/go.mod h1:g9mhiOyEb88W9za5MKFdrMbQXPHAyu78tMsNer+6OC4= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20211215000257-b4b48133acd2 h1:LXOZIjSTF3272rh7Mc1tYa1+AASWVMI5gKhKp1LeQ+A= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20211215000257-b4b48133acd2/go.mod h1:N3dn4Ckq/fIUqkpqPQrSFkblMjr66Pq6J/EPh4SHdt0= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20211215000257-b4b48133acd2 h1:vF0/xvYYAa5exlin1lgA3UXP2zq4iLRRp1+hSgXMrKE= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20211215000257-b4b48133acd2/go.mod h1:ryvliHg05JP+pT/Ghok/F1A/Z5DjgjGne0xF6nZp7F0= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20211215000257-b4b48133acd2 h1:3r0gDqwj34UAgJrSZXhnw4RqNLbAB9/XEE3Z+sC0Its= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20211215000257-b4b48133acd2/go.mod h1:n4FAQ/sulLxnhSKpwirRfkc8wAmnickWzSK9gHtB3Go= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20211215000257-b4b48133acd2 h1:qCY0c5I1EGDdWa1a1kPxmaGY/dugdp8JdZwHDz9aYyg= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20211215000257-b4b48133acd2/go.mod h1:af41Ts8LJIiFBVLr4GVOqzR4sb0mPSiW4HRo/retTf4= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20211215000257-b4b48133acd2 h1:kUSxuO6vrG+K8TzGbsgiqoBG13d9gmokyh8vlXE4/t4= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20211215000257-b4b48133acd2/go.mod h1:76Eh3Ut9i+0FEa5LvhOJzFdADqdjf7TL3zDMIgVtQNw= +github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20211215000257-b4b48133acd2/go.mod h1:ihrc7sT+X5x2XbxaF6TxkDL2OqItFJfYJOxDfoT1Xcg= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20211215000257-b4b48133acd2 h1:AO8oTmk3qwqNi4NLMHwnHsTigh0HxKZFeRnzTeUcAWA= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20211215000257-b4b48133acd2/go.mod h1:HAo1VBPkTGuSkGZfszZKMXbrmQ5/z33BL+sisFw72JU= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20211215000257-b4b48133acd2 h1:ilS+5C/Gu7bW8i/HMqb+EVtXh0yj6myIav+GTkRY/Kc= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20211215000257-b4b48133acd2/go.mod h1:b5HYRMbavdqjLpMyw0qUS/vl2MH7NHrwvbiZdlrLOm4= +github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20211215000257-b4b48133acd2/go.mod h1:ZCFYdnPlCODfcLkq/81/Cu+9rPbjAGWQYfKkXucKMk8= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20211215000257-b4b48133acd2 h1:y2dG25JYDlPk2ipduNxjZfkI+lMf0Z3FtS5fn28bkJ4= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20211215000257-b4b48133acd2/go.mod h1:AtNyV6bU8OshuUzl3fdp7IoK3Xmwyfa1CKXxIKKI1V4= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20211215000257-b4b48133acd2 h1:oNDAZNRFBYEV4G4sqqI2HfBUoZUlz1uSPAS9FafejMQ= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20211215000257-b4b48133acd2/go.mod h1:YrmV2ZC6wJLPAuZbv8nJ/8kH6QuOEcvOHved7gnVdpw= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20211215000257-b4b48133acd2 h1:exwqDW42QDh9dK1DxXg3Zj4gVQrXKDXbL1pSUc3gYEU= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20211215000257-b4b48133acd2/go.mod h1:5LN9+WmmYYWuCr1XcdQ6G8OzWYSyOQcsnSeUdR+9lP4= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20211215000257-b4b48133acd2/go.mod h1:ggGXJ252aDdAu1eWI8oeGBX14RKQUSm2I280vZClwqI= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20211215000257-b4b48133acd2 h1:6HrUOk5MdBPvVYiNApi6p15geMdaUs2CInIqqrA5IOc= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20211215000257-b4b48133acd2/go.mod h1:5FzFit7eCkodT9EtFUUT77GOAo492Ed6zAldu6CRhuM= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20211215000257-b4b48133acd2 h1:3Xv6jHIAsr1ILQTnlG8psHyj4aqVoJzzEMLPohZZ8b4= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20211215000257-b4b48133acd2/go.mod h1:CGThwowQ9mEmXah029ASQFsadJBxvhQMaaK85qscsH4= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20211215000257-b4b48133acd2 h1:5A1uCz2nKgDPN/GA0CDa42stNc+LMrNu+Eph7st7pHU= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20211215000257-b4b48133acd2/go.mod h1:VRjQvHLFBkJOf6jgyqLw15eWPFsPP/Dv1vqO0DLDpX8= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20211215000257-b4b48133acd2 h1:mZy1x+u43NwOoFK7Ckv/EgGpIHoqX9AClK+bXYhrWlo= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20211215000257-b4b48133acd2/go.mod h1:E9QaSsapdp1CNQafce5efnCyi7PloekOS5zcU/PP+ck= +github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20211215000257-b4b48133acd2 h1:ULWxr/n60MXb83LLwG0N9OUVN1MlinMjw0BLgdufZiQ= +github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20211215000257-b4b48133acd2/go.mod h1:T2J6+IF9/nAUEpwtiLMm05S7AvZ7Up7shQ5lEeXPgrg= +github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20211215000257-b4b48133acd2 h1:s2Bx2xm8VKhiCovpXDdXfQMiPx9l1ba3NcZgn0+K/fw= +github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20211215000257-b4b48133acd2/go.mod h1:bvjaieeRmT+PLxlDzj+SHGRXAqumeN2nHpCtwHTiXNw= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20211215000257-b4b48133acd2 h1:Z9kc+RRReK/TDqKpIgL7ZAwb5GZ73ddph2bXepRa6pQ= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20211215000257-b4b48133acd2/go.mod h1:EycBBK7+p/RjTpXnAYKqgD8to0E702DIhluqnkmoFR8= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20211215000257-b4b48133acd2 h1:KsyrKGo7c1wPehtPV0mq1JnbNsVimtTsfT/ut5V5GeA= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20211215000257-b4b48133acd2/go.mod h1:4euVe+uy0mwGtLLdKKrmZrQQM6d84JDJzoR2VFUtU9E= github.com/openshift/library-go v0.0.0-20210521084623-7392ea9b02ca/go.mod h1:87ZYjEncF0YNUKNzncb8Fiw8yFNevpIWZW83C/etzpw= +github.com/openshift/library-go v0.0.0-20211109160828-8c48fafbad15 h1:Cudip5cLnbpbumml7d+23XHtbq8K4pUY77L6yd3VSaw= +github.com/openshift/library-go v0.0.0-20211109160828-8c48fafbad15/go.mod h1:C5DDOSPucn3EVA0T05fODKtAweTObMBrTYm/G3uUBI8= github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible h1:6XSBotNi58b4MwVV4F9o/jd4BaQd+uJyz+s5TR0/ot8= github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible/go.mod h1:azqkkH4Vpp9A579CC26hicol/wViXag9rOwElif6v9E= -github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e h1:jaVXoepwhg7wXGjfOZRtbWg45IIp3BVwWVKTZgsNcrE= -github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -759,8 +766,9 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0 h1:MkTeG1DMwsrdH7QtLXy5W+fUxWq+vmb6cLmyJ7aRtF0= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= @@ -1049,8 +1057,9 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1174,8 +1183,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -1243,8 +1254,9 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/system-validators v1.4.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= @@ -1255,10 +1267,12 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= sigs.k8s.io/kube-storage-version-migrator v0.0.3/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= +sigs.k8s.io/kube-storage-version-migrator v0.0.4 h1:qsCecgZHgdismlTt8xCmS/3numvpxrj58RWJeIg76wc= +sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE= sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY= sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0= @@ -1266,8 +1280,8 @@ sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbg sigs.k8s.io/kustomize/kyaml v0.10.17 h1:4zrV0ym5AYa0e512q7K3Wp1u7mzoWW0xR3UHJcGWGIg= sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/bits-and-blooms/bitset/.gitignore b/vendor/github.com/bits-and-blooms/bitset/.gitignore new file mode 100644 index 000000000000..5c204d28b0e3 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +target diff --git a/vendor/github.com/bits-and-blooms/bitset/.travis.yml b/vendor/github.com/bits-and-blooms/bitset/.travis.yml new file mode 100644 index 000000000000..094aa5ce070c --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/.travis.yml @@ -0,0 +1,37 @@ +language: go + +sudo: false + +branches: + except: + - release + +branches: + only: + - master + - travis + +go: + - "1.11.x" + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; + - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; + - go get github.com/mattn/goveralls + +before_script: + - make deps + +script: + - make qa + +after_failure: + - cat ./target/test/report.xml + +after_success: + - if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/bits-and-blooms/bitset/LICENSE b/vendor/github.com/bits-and-blooms/bitset/LICENSE new file mode 100644 index 000000000000..59cab8a939be --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 Will Fitzgerald. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/bits-and-blooms/bitset/README.md b/vendor/github.com/bits-and-blooms/bitset/README.md new file mode 100644 index 000000000000..97e83071e41a --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/README.md @@ -0,0 +1,93 @@ +# bitset + +*Go language library to map between non-negative integers and boolean values* + +[![Test](https://github.com/bits-and-blooms/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest) +[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/bits-and-blooms/bitset?tab=doc)](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc) + + +## Description + +Package bitset implements bitsets, a mapping between non-negative integers and boolean values. +It should be more efficient than map[uint] bool. + +It provides methods for setting, clearing, flipping, and testing individual integers. + +But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits. + +BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used. + +Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining. + +### Example use: + +```go +package main + +import ( + "fmt" + "math/rand" + + "github.com/bits-and-blooms/bitset" +) + +func main() { + fmt.Printf("Hello from BitSet!\n") + var b bitset.BitSet + // play some Go Fish + for i := 0; i < 100; i++ { + card1 := uint(rand.Intn(52)) + card2 := uint(rand.Intn(52)) + b.Set(card1) + if b.Test(card2) { + fmt.Println("Go Fish!") + } + b.Clear(card1) + } + + // Chaining + b.Set(10).Set(11) + + for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) { + fmt.Println("The following bit is set:", i) + } + if b.Intersection(bitset.New(100).Set(10)).Count() == 1 { + fmt.Println("Intersection works.") + } else { + fmt.Println("Intersection doesn't work???") + } +} +``` + +As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets. + +Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc + +## Memory Usage + +The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring). + +## Implementation Note + +Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed. + +It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped. + +## Installation + +```bash +go get github.com/bits-and-blooms/bitset +``` + +## Contributing + +If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)") + +## Running all tests + +Before committing the code, please check if it passes tests, has adequate coverage, etc. +```bash +go test +go test -cover +``` diff --git a/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml new file mode 100644 index 000000000000..f9b295918404 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml @@ -0,0 +1,39 @@ +# Go +# Build your Go project. +# Add steps that test, save build artifacts, deploy, and more: +# https://docs.microsoft.com/azure/devops/pipelines/languages/go + +trigger: +- master + +pool: + vmImage: 'Ubuntu-16.04' + +variables: + GOBIN: '$(GOPATH)/bin' # Go binaries path + GOROOT: '/usr/local/go1.11' # Go installation path + GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path + modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code + +steps: +- script: | + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + shopt -s extglob + shopt -s dotglob + mv !(gopath) '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + displayName: 'Set up the Go workspace' + +- script: | + go version + go get -v -t -d ./... + if [ -f Gopkg.toml ]; then + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure + fi + go build -v . + workingDirectory: '$(modulePath)' + displayName: 'Get dependencies, then build' diff --git a/vendor/github.com/bits-and-blooms/bitset/bitset.go b/vendor/github.com/bits-and-blooms/bitset/bitset.go new file mode 100644 index 000000000000..d688806a54b8 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/bitset.go @@ -0,0 +1,952 @@ +/* +Package bitset implements bitsets, a mapping +between non-negative integers and boolean values. It should be more +efficient than map[uint] bool. + +It provides methods for setting, clearing, flipping, and testing +individual integers. + +But it also provides set intersection, union, difference, +complement, and symmetric operations, as well as tests to +check whether any, all, or no bits are set, and querying a +bitset's current length and number of positive bits. + +BitSets are expanded to the size of the largest set bit; the +memory allocation is approximately Max bits, where Max is +the largest set bit. BitSets are never shrunk. On creation, +a hint can be given for the number of bits that will be used. + +Many of the methods, including Set,Clear, and Flip, return +a BitSet pointer, which allows for chaining. + +Example use: + + import "bitset" + var b BitSet + b.Set(10).Set(11) + if b.Test(1000) { + b.Clear(1000) + } + if B.Intersection(bitset.New(100).Set(10)).Count() > 1 { + fmt.Println("Intersection works.") + } + +As an alternative to BitSets, one should check out the 'big' package, +which provides a (less set-theoretical) view of bitsets. + +*/ +package bitset + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// the wordSize of a bit set +const wordSize = uint(64) + +// log2WordSize is lg(wordSize) +const log2WordSize = uint(6) + +// allBits has every bit set +const allBits uint64 = 0xffffffffffffffff + +// default binary BigEndian +var binaryOrder binary.ByteOrder = binary.BigEndian + +// default json encoding base64.URLEncoding +var base64Encoding = base64.URLEncoding + +// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding) +func Base64StdEncoding() { base64Encoding = base64.StdEncoding } + +// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian) +func LittleEndian() { binaryOrder = binary.LittleEndian } + +// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0. +type BitSet struct { + length uint + set []uint64 +} + +// Error is used to distinguish errors (panics) generated in this package. +type Error string + +// safeSet will fixup b.set to be non-nil and return the field value +func (b *BitSet) safeSet() []uint64 { + if b.set == nil { + b.set = make([]uint64, wordsNeeded(0)) + } + return b.set +} + +// From is a constructor used to create a BitSet from an array of integers +func From(buf []uint64) *BitSet { + return &BitSet{uint(len(buf)) * 64, buf} +} + +// Bytes returns the bitset as array of integers +func (b *BitSet) Bytes() []uint64 { + return b.set +} + +// wordsNeeded calculates the number of words needed for i bits +func wordsNeeded(i uint) int { + if i > (Cap() - wordSize + 1) { + return int(Cap() >> log2WordSize) + } + return int((i + (wordSize - 1)) >> log2WordSize) +} + +// New creates a new BitSet with a hint that length bits will be required +func New(length uint) (bset *BitSet) { + defer func() { + if r := recover(); r != nil { + bset = &BitSet{ + 0, + make([]uint64, 0), + } + } + }() + + bset = &BitSet{ + length, + make([]uint64, wordsNeeded(length)), + } + + return bset +} + +// Cap returns the total possible capacity, or number of bits +func Cap() uint { + return ^uint(0) +} + +// Len returns the number of bits in the BitSet. +// Note the difference to method Count, see example. +func (b *BitSet) Len() uint { + return b.length +} + +// extendSetMaybe adds additional words to incorporate new bits if needed +func (b *BitSet) extendSetMaybe(i uint) { + if i >= b.length { // if we need more bits, make 'em + if i >= Cap() { + panic("You are exceeding the capacity") + } + nsize := wordsNeeded(i + 1) + if b.set == nil { + b.set = make([]uint64, nsize) + } else if cap(b.set) >= nsize { + b.set = b.set[:nsize] // fast resize + } else if len(b.set) < nsize { + newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x + copy(newset, b.set) + b.set = newset + } + b.length = i + 1 + } +} + +// Test whether bit i is set. +func (b *BitSet) Test(i uint) bool { + if i >= b.length { + return false + } + return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0 +} + +// Set bit i to 1, the capacity of the bitset is automatically +// increased accordingly. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) Set(i uint) *BitSet { + b.extendSetMaybe(i) + b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1)) + return b +} + +// Clear bit i to 0 +func (b *BitSet) Clear(i uint) *BitSet { + if i >= b.length { + return b + } + b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1)) + return b +} + +// SetTo sets bit i to value. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) SetTo(i uint, value bool) *BitSet { + if value { + return b.Set(i) + } + return b.Clear(i) +} + +// Flip bit at i. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) Flip(i uint) *BitSet { + if i >= b.length { + return b.Set(i) + } + b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1)) + return b +} + +// FlipRange bit in [start, end). +// If end>= Cap(), this function will panic. +// Warning: using a very large value for 'end' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) FlipRange(start, end uint) *BitSet { + if start >= end { + return b + } + + b.extendSetMaybe(end - 1) + var startWord uint = start >> log2WordSize + var endWord uint = end >> log2WordSize + b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1))) + for i := startWord; i < endWord; i++ { + b.set[i] = ^b.set[i] + } + b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1)) + return b +} + +// Shrink shrinks BitSet so that the provided value is the last possible +// set value. It clears all bits > the provided index and reduces the size +// and length of the set. +// +// Note that the parameter value is not the new length in bits: it is the +// maximal value that can be stored in the bitset after the function call. +// The new length in bits is the parameter value + 1. Thus it is not possible +// to use this function to set the length to 0, the minimal value of the length +// after this function call is 1. +// +// A new slice is allocated to store the new bits, so you may see an increase in +// memory usage until the GC runs. Normally this should not be a problem, but if you +// have an extremely large BitSet its important to understand that the old BitSet will +// remain in memory until the GC frees it. +func (b *BitSet) Shrink(lastbitindex uint) *BitSet { + length := lastbitindex + 1 + idx := wordsNeeded(length) + if idx > len(b.set) { + return b + } + shrunk := make([]uint64, idx) + copy(shrunk, b.set[:idx]) + b.set = shrunk + b.length = length + b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1)))) + return b +} + +// Compact shrinks BitSet to so that we preserve all set bits, while minimizing +// memory usage. Compact calls Shrink. +func (b *BitSet) Compact() *BitSet { + idx := len(b.set) - 1 + for ; idx >= 0 && b.set[idx] == 0; idx-- { + } + newlength := uint((idx + 1) << log2WordSize) + if newlength >= b.length { + return b // nothing to do + } + if newlength > 0 { + return b.Shrink(newlength - 1) + } + // We preserve one word + return b.Shrink(63) +} + +// InsertAt takes an index which indicates where a bit should be +// inserted. Then it shifts all the bits in the set to the left by 1, starting +// from the given index position, and sets the index position to 0. +// +// Depending on the size of your BitSet, and where you are inserting the new entry, +// this method could be extremely slow and in some cases might cause the entire BitSet +// to be recopied. +func (b *BitSet) InsertAt(idx uint) *BitSet { + insertAtElement := (idx >> log2WordSize) + + // if length of set is a multiple of wordSize we need to allocate more space first + if b.isLenExactMultiple() { + b.set = append(b.set, uint64(0)) + } + + var i uint + for i = uint(len(b.set) - 1); i > insertAtElement; i-- { + // all elements above the position where we want to insert can simply by shifted + b.set[i] <<= 1 + + // we take the most significant bit of the previous element and set it as + // the least significant bit of the current element + b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63 + } + + // generate a mask to extract the data that we need to shift left + // within the element where we insert a bit + dataMask := ^(uint64(1)< 0x40000 { + buffer.WriteString("...") + break + } + buffer.WriteString(strconv.FormatInt(int64(i), 10)) + i, e = b.NextSet(i + 1) + if e { + buffer.WriteString(",") + } + } + buffer.WriteString("}") + return buffer.String() +} + +// DeleteAt deletes the bit at the given index position from +// within the bitset +// All the bits residing on the left of the deleted bit get +// shifted right by 1 +// The running time of this operation may potentially be +// relatively slow, O(length) +func (b *BitSet) DeleteAt(i uint) *BitSet { + // the index of the slice element where we'll delete a bit + deleteAtElement := i >> log2WordSize + + // generate a mask for the data that needs to be shifted right + // within that slice element that gets modified + dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1) + + // extract the data that we'll shift right from the slice element + data := b.set[deleteAtElement] & dataMask + + // set the masked area to 0 while leaving the rest as it is + b.set[deleteAtElement] &= ^dataMask + + // shift the previously extracted data to the right and then + // set it in the previously masked area + b.set[deleteAtElement] |= (data >> 1) & dataMask + + // loop over all the consecutive slice elements to copy each + // lowest bit into the highest position of the previous element, + // then shift the entire content to the right by 1 + for i := int(deleteAtElement) + 1; i < len(b.set); i++ { + b.set[i-1] |= (b.set[i] & 1) << 63 + b.set[i] >>= 1 + } + + b.length = b.length - 1 + + return b +} + +// NextSet returns the next bit set from the specified index, +// including possibly the current index +// along with an error code (true = valid, false = no set bit found) +// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...} +// +// Users concerned with performance may want to use NextSetMany to +// retrieve several values at once. +func (b *BitSet) NextSet(i uint) (uint, bool) { + x := int(i >> log2WordSize) + if x >= len(b.set) { + return 0, false + } + w := b.set[x] + w = w >> (i & (wordSize - 1)) + if w != 0 { + return i + trailingZeroes64(w), true + } + x = x + 1 + for x < len(b.set) { + if b.set[x] != 0 { + return uint(x)*wordSize + trailingZeroes64(b.set[x]), true + } + x = x + 1 + + } + return 0, false +} + +// NextSetMany returns many next bit sets from the specified index, +// including possibly the current index and up to cap(buffer). +// If the returned slice has len zero, then no more set bits were found +// +// buffer := make([]uint, 256) // this should be reused +// j := uint(0) +// j, buffer = bitmap.NextSetMany(j, buffer) +// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) { +// for k := range buffer { +// do something with buffer[k] +// } +// j += 1 +// } +// +// +// It is possible to retrieve all set bits as follow: +// +// indices := make([]uint, bitmap.Count()) +// bitmap.NextSetMany(0, indices) +// +// However if bitmap.Count() is large, it might be preferable to +// use several calls to NextSetMany, for performance reasons. +func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) { + myanswer := buffer + capacity := cap(buffer) + x := int(i >> log2WordSize) + if x >= len(b.set) || capacity == 0 { + return 0, myanswer[:0] + } + skip := i & (wordSize - 1) + word := b.set[x] >> skip + myanswer = myanswer[:capacity] + size := int(0) + for word != 0 { + r := trailingZeroes64(word) + t := word & ((^word) + 1) + myanswer[size] = r + i + size++ + if size == capacity { + goto End + } + word = word ^ t + } + x++ + for idx, word := range b.set[x:] { + for word != 0 { + r := trailingZeroes64(word) + t := word & ((^word) + 1) + myanswer[size] = r + (uint(x+idx) << 6) + size++ + if size == capacity { + goto End + } + word = word ^ t + } + } +End: + if size > 0 { + return myanswer[size-1], myanswer[:size] + } + return 0, myanswer[:0] +} + +// NextClear returns the next clear bit from the specified index, +// including possibly the current index +// along with an error code (true = valid, false = no bit found i.e. all bits are set) +func (b *BitSet) NextClear(i uint) (uint, bool) { + x := int(i >> log2WordSize) + if x >= len(b.set) { + return 0, false + } + w := b.set[x] + w = w >> (i & (wordSize - 1)) + wA := allBits >> (i & (wordSize - 1)) + index := i + trailingZeroes64(^w) + if w != wA && index < b.length { + return index, true + } + x++ + for x < len(b.set) { + index = uint(x)*wordSize + trailingZeroes64(^b.set[x]) + if b.set[x] != allBits && index < b.length { + return index, true + } + x++ + } + return 0, false +} + +// ClearAll clears the entire BitSet +func (b *BitSet) ClearAll() *BitSet { + if b != nil && b.set != nil { + for i := range b.set { + b.set[i] = 0 + } + } + return b +} + +// wordCount returns the number of words used in a bit set +func (b *BitSet) wordCount() int { + return len(b.set) +} + +// Clone this BitSet +func (b *BitSet) Clone() *BitSet { + c := New(b.length) + if b.set != nil { // Clone should not modify current object + copy(c.set, b.set) + } + return c +} + +// Copy into a destination BitSet +// Returning the size of the destination BitSet +// like array copy +func (b *BitSet) Copy(c *BitSet) (count uint) { + if c == nil { + return + } + if b.set != nil { // Copy should not modify current object + copy(c.set, b.set) + } + count = c.length + if b.length < c.length { + count = b.length + } + return +} + +// Count (number of set bits). +// Also known as "popcount" or "population count". +func (b *BitSet) Count() uint { + if b != nil && b.set != nil { + return uint(popcntSlice(b.set)) + } + return 0 +} + +// Equal tests the equivalence of two BitSets. +// False if they are of different sizes, otherwise true +// only if all the same bits are set +func (b *BitSet) Equal(c *BitSet) bool { + if c == nil || b == nil { + return c == b + } + if b.length != c.length { + return false + } + if b.length == 0 { // if they have both length == 0, then could have nil set + return true + } + // testing for equality shoud not transform the bitset (no call to safeSet) + + for p, v := range b.set { + if c.set[p] != v { + return false + } + } + return true +} + +func panicIfNull(b *BitSet) { + if b == nil { + panic(Error("BitSet must not be null")) + } +} + +// Difference of base set and other set +// This is the BitSet equivalent of &^ (and not) +func (b *BitSet) Difference(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + result = b.Clone() // clone b (in case b is bigger than compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + result.set[i] = b.set[i] &^ compare.set[i] + } + return +} + +// DifferenceCardinality computes the cardinality of the differnce +func (b *BitSet) DifferenceCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + cnt := uint64(0) + cnt += popcntMaskSlice(b.set[:l], compare.set[:l]) + cnt += popcntSlice(b.set[l:]) + return uint(cnt) +} + +// InPlaceDifference computes the difference of base set and other set +// This is the BitSet equivalent of &^ (and not) +func (b *BitSet) InPlaceDifference(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + b.set[i] &^= compare.set[i] + } +} + +// Convenience function: return two bitsets ordered by +// increasing length. Note: neither can be nil +func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) { + if a.length <= b.length { + ap, bp = a, b + } else { + ap, bp = b, a + } + return +} + +// Intersection of base set and other set +// This is the BitSet equivalent of & (and) +func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + result = New(b.length) + for i, word := range b.set { + result.set[i] = word & compare.set[i] + } + return +} + +// IntersectionCardinality computes the cardinality of the union +func (b *BitSet) IntersectionCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntAndSlice(b.set, compare.set) + return uint(cnt) +} + +// InPlaceIntersection destructively computes the intersection of +// base set and the compare set. +// This is the BitSet equivalent of & (and) +func (b *BitSet) InPlaceIntersection(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + b.set[i] &= compare.set[i] + } + for i := l; i < len(b.set); i++ { + b.set[i] = 0 + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } +} + +// Union of base set and other set +// This is the BitSet equivalent of | (or) +func (b *BitSet) Union(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + result = compare.Clone() + for i, word := range b.set { + result.set[i] = word | compare.set[i] + } + return +} + +// UnionCardinality computes the cardinality of the uniton of the base set +// and the compare set. +func (b *BitSet) UnionCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntOrSlice(b.set, compare.set) + if len(compare.set) > len(b.set) { + cnt += popcntSlice(compare.set[len(b.set):]) + } + return uint(cnt) +} + +// InPlaceUnion creates the destructive union of base set and compare set. +// This is the BitSet equivalent of | (or). +func (b *BitSet) InPlaceUnion(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } + for i := 0; i < l; i++ { + b.set[i] |= compare.set[i] + } + if len(compare.set) > l { + for i := l; i < len(compare.set); i++ { + b.set[i] = compare.set[i] + } + } +} + +// SymmetricDifference of base set and other set +// This is the BitSet equivalent of ^ (xor) +func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + // compare is bigger, so clone it + result = compare.Clone() + for i, word := range b.set { + result.set[i] = word ^ compare.set[i] + } + return +} + +// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference +func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntXorSlice(b.set, compare.set) + if len(compare.set) > len(b.set) { + cnt += popcntSlice(compare.set[len(b.set):]) + } + return uint(cnt) +} + +// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set +// This is the BitSet equivalent of ^ (xor) +func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } + for i := 0; i < l; i++ { + b.set[i] ^= compare.set[i] + } + if len(compare.set) > l { + for i := l; i < len(compare.set); i++ { + b.set[i] = compare.set[i] + } + } +} + +// Is the length an exact multiple of word sizes? +func (b *BitSet) isLenExactMultiple() bool { + return b.length%wordSize == 0 +} + +// Clean last word by setting unused bits to 0 +func (b *BitSet) cleanLastWord() { + if !b.isLenExactMultiple() { + b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize) + } +} + +// Complement computes the (local) complement of a biset (up to length bits) +func (b *BitSet) Complement() (result *BitSet) { + panicIfNull(b) + result = New(b.length) + for i, word := range b.set { + result.set[i] = ^word + } + result.cleanLastWord() + return +} + +// All returns true if all bits are set, false otherwise. Returns true for +// empty sets. +func (b *BitSet) All() bool { + panicIfNull(b) + return b.Count() == b.length +} + +// None returns true if no bit is set, false otherwise. Returns true for +// empty sets. +func (b *BitSet) None() bool { + panicIfNull(b) + if b != nil && b.set != nil { + for _, word := range b.set { + if word > 0 { + return false + } + } + return true + } + return true +} + +// Any returns true if any bit is set, false otherwise +func (b *BitSet) Any() bool { + panicIfNull(b) + return !b.None() +} + +// IsSuperSet returns true if this is a superset of the other set +func (b *BitSet) IsSuperSet(other *BitSet) bool { + for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) { + if !b.Test(i) { + return false + } + } + return true +} + +// IsStrictSuperSet returns true if this is a strict superset of the other set +func (b *BitSet) IsStrictSuperSet(other *BitSet) bool { + return b.Count() > other.Count() && b.IsSuperSet(other) +} + +// DumpAsBits dumps a bit set as a string of bits +func (b *BitSet) DumpAsBits() string { + if b.set == nil { + return "." + } + buffer := bytes.NewBufferString("") + i := len(b.set) - 1 + for ; i >= 0; i-- { + fmt.Fprintf(buffer, "%064b.", b.set[i]) + } + return buffer.String() +} + +// BinaryStorageSize returns the binary storage requirements +func (b *BitSet) BinaryStorageSize() int { + return binary.Size(uint64(0)) + binary.Size(b.set) +} + +// WriteTo writes a BitSet to a stream +func (b *BitSet) WriteTo(stream io.Writer) (int64, error) { + length := uint64(b.length) + + // Write length + err := binary.Write(stream, binaryOrder, length) + if err != nil { + return 0, err + } + + // Write set + err = binary.Write(stream, binaryOrder, b.set) + return int64(b.BinaryStorageSize()), err +} + +// ReadFrom reads a BitSet from a stream written using WriteTo +func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { + var length uint64 + + // Read length first + err := binary.Read(stream, binaryOrder, &length) + if err != nil { + return 0, err + } + newset := New(uint(length)) + + if uint64(newset.length) != length { + return 0, errors.New("unmarshalling error: type mismatch") + } + + // Read remaining bytes as set + err = binary.Read(stream, binaryOrder, newset.set) + if err != nil { + return 0, err + } + + *b = *newset + return int64(b.BinaryStorageSize()), nil +} + +// MarshalBinary encodes a BitSet into a binary form and returns the result. +func (b *BitSet) MarshalBinary() ([]byte, error) { + var buf bytes.Buffer + writer := bufio.NewWriter(&buf) + + _, err := b.WriteTo(writer) + if err != nil { + return []byte{}, err + } + + err = writer.Flush() + + return buf.Bytes(), err +} + +// UnmarshalBinary decodes the binary form generated by MarshalBinary. +func (b *BitSet) UnmarshalBinary(data []byte) error { + buf := bytes.NewReader(data) + reader := bufio.NewReader(buf) + + _, err := b.ReadFrom(reader) + + return err +} + +// MarshalJSON marshals a BitSet as a JSON structure +func (b *BitSet) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize())) + _, err := b.WriteTo(buffer) + if err != nil { + return nil, err + } + + // URLEncode all bytes + return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes())) +} + +// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON +func (b *BitSet) UnmarshalJSON(data []byte) error { + // Unmarshal as string + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + + // URLDecode string + buf, err := base64Encoding.DecodeString(s) + if err != nil { + return err + } + + _, err = b.ReadFrom(bytes.NewReader(buf)) + return err +} diff --git a/vendor/github.com/bits-and-blooms/bitset/go.mod b/vendor/github.com/bits-and-blooms/bitset/go.mod new file mode 100644 index 000000000000..c43e4522b7f9 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/go.mod @@ -0,0 +1,3 @@ +module github.com/bits-and-blooms/bitset + +go 1.14 diff --git a/vendor/github.com/bits-and-blooms/bitset/go.sum b/vendor/github.com/bits-and-blooms/bitset/go.sum new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt.go b/vendor/github.com/bits-and-blooms/bitset/popcnt.go new file mode 100644 index 000000000000..76577a838284 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt.go @@ -0,0 +1,53 @@ +package bitset + +// bit population count, take from +// https://code.google.com/p/go/issues/detail?id=4988#c11 +// credit: https://code.google.com/u/arnehormann/ +func popcount(x uint64) (n uint64) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return x >> 56 +} + +func popcntSliceGo(s []uint64) uint64 { + cnt := uint64(0) + for _, x := range s { + cnt += popcount(x) + } + return cnt +} + +func popcntMaskSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] &^ m[i]) + } + return cnt +} + +func popcntAndSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] & m[i]) + } + return cnt +} + +func popcntOrSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] | m[i]) + } + return cnt +} + +func popcntXorSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] ^ m[i]) + } + return cnt +} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go new file mode 100644 index 000000000000..fc8ff4f367c2 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go @@ -0,0 +1,45 @@ +// +build go1.9 + +package bitset + +import "math/bits" + +func popcntSlice(s []uint64) uint64 { + var cnt int + for _, x := range s { + cnt += bits.OnesCount64(x) + } + return uint64(cnt) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] &^ m[i]) + } + return uint64(cnt) +} + +func popcntAndSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] & m[i]) + } + return uint64(cnt) +} + +func popcntOrSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] | m[i]) + } + return uint64(cnt) +} + +func popcntXorSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] ^ m[i]) + } + return uint64(cnt) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go new file mode 100644 index 000000000000..4cf64f24ad03 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go @@ -0,0 +1,68 @@ +// +build !go1.9 +// +build amd64,!appengine + +package bitset + +// *** the following functions are defined in popcnt_amd64.s + +//go:noescape + +func hasAsm() bool + +// useAsm is a flag used to select the GO or ASM implementation of the popcnt function +var useAsm = hasAsm() + +//go:noescape + +func popcntSliceAsm(s []uint64) uint64 + +//go:noescape + +func popcntMaskSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntAndSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntOrSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntXorSliceAsm(s, m []uint64) uint64 + +func popcntSlice(s []uint64) uint64 { + if useAsm { + return popcntSliceAsm(s) + } + return popcntSliceGo(s) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + if useAsm { + return popcntMaskSliceAsm(s, m) + } + return popcntMaskSliceGo(s, m) +} + +func popcntAndSlice(s, m []uint64) uint64 { + if useAsm { + return popcntAndSliceAsm(s, m) + } + return popcntAndSliceGo(s, m) +} + +func popcntOrSlice(s, m []uint64) uint64 { + if useAsm { + return popcntOrSliceAsm(s, m) + } + return popcntOrSliceGo(s, m) +} + +func popcntXorSlice(s, m []uint64) uint64 { + if useAsm { + return popcntXorSliceAsm(s, m) + } + return popcntXorSliceGo(s, m) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s new file mode 100644 index 000000000000..666c0dcc17f5 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s @@ -0,0 +1,104 @@ +// +build !go1.9 +// +build amd64,!appengine + +TEXT ·hasAsm(SB),4,$0-1 +MOVQ $1, AX +CPUID +SHRQ $23, CX +ANDQ $1, CX +MOVB CX, ret+0(FP) +RET + +#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2 + +TEXT ·popcntSliceAsm(SB),4,$0-32 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntSliceEnd +popcntSliceLoop: +BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX +ADDQ DX, AX +ADDQ $8, SI +LOOP popcntSliceLoop +popcntSliceEnd: +MOVQ AX, ret+24(FP) +RET + +TEXT ·popcntMaskSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntMaskSliceEnd +MOVQ m+24(FP), DI +popcntMaskSliceLoop: +MOVQ (DI), DX +NOTQ DX +ANDQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntMaskSliceLoop +popcntMaskSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntAndSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntAndSliceEnd +MOVQ m+24(FP), DI +popcntAndSliceLoop: +MOVQ (DI), DX +ANDQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntAndSliceLoop +popcntAndSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntOrSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntOrSliceEnd +MOVQ m+24(FP), DI +popcntOrSliceLoop: +MOVQ (DI), DX +ORQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntOrSliceLoop +popcntOrSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntXorSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntXorSliceEnd +MOVQ m+24(FP), DI +popcntXorSliceLoop: +MOVQ (DI), DX +XORQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntXorSliceLoop +popcntXorSliceEnd: +MOVQ AX, ret+48(FP) +RET diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go new file mode 100644 index 000000000000..21e0ff7b4fc5 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go @@ -0,0 +1,24 @@ +// +build !go1.9 +// +build !amd64 appengine + +package bitset + +func popcntSlice(s []uint64) uint64 { + return popcntSliceGo(s) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + return popcntMaskSliceGo(s, m) +} + +func popcntAndSlice(s, m []uint64) uint64 { + return popcntAndSliceGo(s, m) +} + +func popcntOrSlice(s, m []uint64) uint64 { + return popcntOrSliceGo(s, m) +} + +func popcntXorSlice(s, m []uint64) uint64 { + return popcntXorSliceGo(s, m) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go new file mode 100644 index 000000000000..c52b61be9fc2 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go @@ -0,0 +1,14 @@ +// +build !go1.9 + +package bitset + +var deBruijn = [...]byte{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} + +func trailingZeroes64(v uint64) uint { + return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58]) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go new file mode 100644 index 000000000000..36a988e714d1 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go @@ -0,0 +1,9 @@ +// +build go1.9 + +package bitset + +import "math/bits" + +func trailingZeroes64(v uint64) uint { + return uint(bits.TrailingZeros64(v)) +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go index 1e7ff6420577..066b4323b499 100644 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoimpl" @@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP { // Find the descriptor in the v2 registry. var b []byte if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { - b = fd.ProtoLegacyRawDesc() - } else { - // TODO: Use protodesc.ToFileDescriptorProto to construct - // a descriptorpb.FileDescriptorProto and marshal it. - // However, doing so causes the proto package to have a dependency - // on descriptorpb, leading to cyclic dependency issues. - } + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) } // Locally cache the raw descriptor form for the file. diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index e729dcff13c2..85f9f57365fd 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/" // AnyMessageName returns the message name contained in an anypb.Any message. // Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. func AnyMessageName(any *anypb.Any) (string, error) { name, err := anyMessageName(any) return string(name), err @@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { } // MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. func MarshalAny(m proto.Message) (*anypb.Any, error) { switch dm := m.(type) { case DynamicAny: @@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) { // Empty returns a new message of the type specified in an anypb.Any message. // It returns protoregistry.NotFound if the corresponding message type could not // be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. func Empty(any *anypb.Any) (proto.Message, error) { name, err := anyMessageName(any) if err != nil { @@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) { // // The target message m may be a *DynamicAny message. If the underlying message // type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. func UnmarshalAny(any *anypb.Any, m proto.Message) error { if dm, ok := m.(*DynamicAny); ok { if dm.Message == nil { @@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error { } // Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. func Is(any *anypb.Any, m proto.Message) bool { if any == nil || m == nil { return false @@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool { // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. type DynamicAny struct{ proto.Message } func (m DynamicAny) String() string { diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index fb9edd5c6273..d3c33259d28d 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 6110ae8a41d9..b2b55dd851f5 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -21,6 +21,8 @@ const ( // Duration converts a durationpb.Duration to a time.Duration. // Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. func Duration(dur *durationpb.Duration) (time.Duration, error) { if err := validateDuration(dur); err != nil { return 0, err @@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) { } // DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 026d0d49155d..8368a3f70d38 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -33,6 +33,8 @@ const ( // // A nil Timestamp returns an error. The first return value in that case is // undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. @@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { @@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { ts := ×tamppb.Timestamp{ Seconds: t.Unix(), @@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { // TimestampString returns the RFC 3339 string for valid Timestamps. // For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go index 51ce36fb60da..e4ffca838a17 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go @@ -11,7 +11,6 @@ import ( "time" "github.com/google/go-cmp/cmp" - "golang.org/x/xerrors" ) func equateAlways(_, _ interface{}) bool { return true } @@ -147,10 +146,3 @@ func areConcreteErrors(x, y interface{}) bool { _, ok2 := y.(error) return ok1 && ok2 } - -func compareErrors(x, y interface{}) bool { - xe := x.(error) - ye := y.(error) - // TODO(≥go1.13): Use standard definition of errors.Is. - return xerrors.Is(xe, ye) || xerrors.Is(ye, xe) -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go new file mode 100644 index 000000000000..26fe25d6afbc --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go @@ -0,0 +1,15 @@ +// Copyright 2021, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package cmpopts + +import "errors" + +func compareErrors(x, y interface{}) bool { + xe := x.(error) + ye := y.(error) + return errors.Is(xe, ye) || errors.Is(ye, xe) +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go new file mode 100644 index 000000000000..6eeb8d6e6543 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go @@ -0,0 +1,18 @@ +// Copyright 2021, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +// TODO(≥go1.13): For support on 0: return false // Some ignore option was used case v.NumTransformed > 0: @@ -45,7 +43,16 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false } - switch t := v.Type; t.Kind() { + // Check whether this is an interface with the same concrete types. + t := v.Type + vx, vy := v.ValueX, v.ValueY + if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + } + + // Check whether we provide specialized diffing for this type. + switch t.Kind() { case reflect.String: case reflect.Array, reflect.Slice: // Only slices of primitive types have specialized handling. @@ -57,6 +64,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false } + // Both slice values have to be non-empty. + if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { + return false + } + // If a sufficient number of elements already differ, // use specialized formatting even if length requirement is not met. if v.NumDiff > v.NumSame { @@ -68,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { // Use specialized string diffing for longer slices or strings. const minLength = 64 - return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength + return vx.Len() >= minLength && vy.Len() >= minLength } // FormatDiffSlice prints a diff for the slices (or strings) represented by v. @@ -77,6 +89,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { assert(opts.DiffMode == diffUnknown) t, vx, vy := v.Type, v.ValueX, v.ValueY + if t.Kind() == reflect.Interface { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + opts = opts.WithTypeMode(emitType) + } // Auto-detect the type of the data. var isLinedText, isText, isBinary bool diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go index cf2f948fe3fd..3114fc5a3cdd 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go @@ -131,7 +131,25 @@ func (s *FreezerGroup) GetState(path string) (configs.FreezerState, error) { case "THAWED": return configs.Thawed, nil case "FROZEN": - return configs.Frozen, nil + // Find out whether the cgroup is frozen directly, + // or indirectly via an ancestor. + self, err := fscommon.ReadFile(path, "freezer.self_freezing") + if err != nil { + // If the kernel is too old, then we just treat + // it as being frozen. + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.ENODEV) { + err = nil + } + return configs.Frozen, err + } + switch self { + case "0\n": + return configs.Thawed, nil + case "1\n": + return configs.Frozen, nil + default: + return configs.Undefined, fmt.Errorf(`unknown "freezer.self_freezing" state: %q`, self) + } case "FREEZING": // Make sure we get a stable freezer state, so retry if the cgroup // is still undergoing freezing. This should be a temporary delay. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go index 24ef91ea2e74..2cf4a6097bec 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go @@ -158,14 +158,27 @@ func findDeviceGroup(ruleType devices.Type, ruleMajor int64) (string, error) { return "", nil } +// DeviceAllow is the dbus type "a(ss)" which means we need a struct +// to represent it in Go. +type deviceAllowEntry struct { + Path string + Perms string +} + +func allowAllDevices() []systemdDbus.Property { + // Setting mode to auto and removing all DeviceAllow rules + // results in allowing access to all devices. + return []systemdDbus.Property{ + newProp("DevicePolicy", "auto"), + newProp("DeviceAllow", []deviceAllowEntry{}), + } +} + // generateDeviceProperties takes the configured device rules and generates a // corresponding set of systemd properties to configure the devices correctly. -func generateDeviceProperties(rules []*devices.Rule) ([]systemdDbus.Property, error) { - // DeviceAllow is the type "a(ss)" which means we need a temporary struct - // to represent it in Go. - type deviceAllowEntry struct { - Path string - Perms string +func generateDeviceProperties(r *configs.Resources) ([]systemdDbus.Property, error) { + if r.SkipDevices { + return nil, nil } properties := []systemdDbus.Property{ @@ -177,7 +190,7 @@ func generateDeviceProperties(rules []*devices.Rule) ([]systemdDbus.Property, er // Figure out the set of rules. configEmu := &cgroupdevices.Emulator{} - for _, rule := range rules { + for _, rule := range r.Devices { if err := configEmu.Apply(*rule); err != nil { return nil, errors.Wrap(err, "apply rule for systemd") } @@ -189,12 +202,7 @@ func generateDeviceProperties(rules []*devices.Rule) ([]systemdDbus.Property, er if configEmu.IsBlacklist() { // However, if we're dealing with an allow-all rule then we can do it. if configEmu.IsAllowAll() { - return []systemdDbus.Property{ - // Run in white-list mode by setting to "auto" and removing all - // DeviceAllow rules. - newProp("DevicePolicy", "auto"), - newProp("DeviceAllow", []deviceAllowEntry{}), - }, nil + return allowAllDevices(), nil } logrus.Warn("systemd doesn't support blacklist device rules -- applying temporary deny-all rule") return properties, nil @@ -303,6 +311,14 @@ func getUnitName(c *configs.Cgroup) string { return c.Name } +// This code should be in sync with getUnitName. +func getUnitType(unitName string) string { + if strings.HasSuffix(unitName, ".slice") { + return "Slice" + } + return "Scope" +} + // isDbusError returns true if the error is a specific dbus error. func isDbusError(err error, name string) bool { if err != nil { @@ -355,6 +371,9 @@ func stopUnit(cm *dbusConnManager, unitName string) error { return err }) if err == nil { + timeout := time.NewTimer(30 * time.Second) + defer timeout.Stop() + select { case s := <-statusChan: close(statusChan) @@ -362,8 +381,8 @@ func stopUnit(cm *dbusConnManager, unitName string) error { if s != "done" { logrus.Warnf("error removing unit `%s`: got `%s`. Continuing...", unitName, s) } - case <-time.After(time.Second): - logrus.Warnf("Timed out while waiting for StopUnit(%s) completion signal from dbus. Continuing...", unitName) + case <-timeout.C: + return errors.New("Timed out while waiting for systemd to remove " + unitName) } } return nil @@ -378,6 +397,15 @@ func resetFailedUnit(cm *dbusConnManager, name string) { } } +func getUnitTypeProperty(cm *dbusConnManager, unitName string, unitType string, propertyName string) (*systemdDbus.Property, error) { + var prop *systemdDbus.Property + err := cm.retryOnDisconnect(func(c *systemdDbus.Conn) (Err error) { + prop, Err = c.GetUnitTypePropertyContext(context.TODO(), unitName, unitType, propertyName) + return Err + }) + return prop, err +} + func setUnitProperties(cm *dbusConnManager, name string, properties ...systemdDbus.Property) error { return cm.retryOnDisconnect(func(c *systemdDbus.Conn) error { return c.SetUnitPropertiesContext(context.TODO(), name, true, properties...) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go index 41de6e8b70f0..8097eba13fce 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go @@ -6,14 +6,17 @@ import ( "errors" "os" "path/filepath" + "reflect" "strings" "sync" systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" + "github.com/sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" - "github.com/sirupsen/logrus" ) type legacyManager struct { @@ -61,7 +64,7 @@ var legacySubsystems = []subsystem{ func genV1ResourcesProperties(r *configs.Resources, cm *dbusConnManager) ([]systemdDbus.Property, error) { var properties []systemdDbus.Property - deviceProperties, err := generateDeviceProperties(r.Devices) + deviceProperties, err := generateDeviceProperties(r) if err != nil { return nil, err } @@ -207,9 +210,10 @@ func (m *legacyManager) Destroy() error { stopErr := stopUnit(m.dbus, getUnitName(m.cgroups)) - // Both on success and on error, cleanup all the cgroups we are aware of. - // Some of them were created directly by Apply() and are not managed by systemd. - if err := cgroups.RemovePaths(m.paths); err != nil { + // Both on success and on error, cleanup all the cgroups + // we are aware of, as some of them were created directly + // by Apply() and are not managed by systemd. + if err := cgroups.RemovePaths(m.paths); err != nil && stopErr == nil { return err } @@ -277,18 +281,23 @@ func getSubsystemPath(c *configs.Cgroup, subsystem string) (string, error) { } func (m *legacyManager) Freeze(state configs.FreezerState) error { + err := m.doFreeze(state) + if err == nil { + m.cgroups.Resources.Freezer = state + } + return err +} + +// doFreeze is the same as Freeze but without +// changing the m.cgroups.Resources.Frozen field. +func (m *legacyManager) doFreeze(state configs.FreezerState) error { path, ok := m.paths["freezer"] if !ok { return errSubsystemDoesNotExist } - prevState := m.cgroups.Resources.Freezer - m.cgroups.Resources.Freezer = state freezer := &fs.FreezerGroup{} - if err := freezer.Set(path, m.cgroups.Resources); err != nil { - m.cgroups.Resources.Freezer = prevState - return err - } - return nil + resources := &configs.Resources{Freezer: state} + return freezer.Set(path, resources) } func (m *legacyManager) GetPids() ([]int, error) { @@ -324,6 +333,71 @@ func (m *legacyManager) GetStats() (*cgroups.Stats, error) { return stats, nil } +// freezeBeforeSet answers whether there is a need to freeze the cgroup before +// applying its systemd unit properties, and thaw after, while avoiding +// unnecessary freezer state changes. +// +// The reason why we have to freeze is that systemd's application of device +// rules is done disruptively, resulting in spurious errors to common devices +// (unlike our fs driver, they will happily write deny-all rules to running +// containers). So we have to freeze the container to avoid the container get +// an occasional "permission denied" error. +func (m *legacyManager) freezeBeforeSet(unitName string, r *configs.Resources) (needsFreeze, needsThaw bool, err error) { + // Special case for SkipDevices, as used by Kubernetes to create pod + // cgroups with allow-all device policy). + if r.SkipDevices { + if r.SkipFreezeOnSet { + // Both needsFreeze and needsThaw are false. + return + } + + // No need to freeze if SkipDevices is set, and either + // (1) systemd unit does not (yet) exist, or + // (2) it has DevicePolicy=auto and empty DeviceAllow list. + // + // Interestingly, (1) and (2) are the same here because + // a non-existent unit returns default properties, + // and settings in (2) are the defaults. + // + // Do not return errors from getUnitTypeProperty, as they alone + // should not prevent Set from working. + + unitType := getUnitType(unitName) + + devPolicy, e := getUnitTypeProperty(m.dbus, unitName, unitType, "DevicePolicy") + if e == nil && devPolicy.Value == dbus.MakeVariant("auto") { + devAllow, e := getUnitTypeProperty(m.dbus, unitName, unitType, "DeviceAllow") + if e == nil { + if rv := reflect.ValueOf(devAllow.Value.Value()); rv.Kind() == reflect.Slice && rv.Len() == 0 { + needsFreeze = false + needsThaw = false + return + } + } + } + } + + needsFreeze = true + needsThaw = true + + // Check the current freezer state. + freezerState, err := m.GetFreezerState() + if err != nil { + return + } + if freezerState == configs.Frozen { + // Already frozen, and should stay frozen. + needsFreeze = false + needsThaw = false + } + + if r.Freezer == configs.Frozen { + // Will be frozen anyway -- no need to thaw. + needsThaw = false + } + return +} + func (m *legacyManager) Set(r *configs.Resources) error { // If Paths are set, then we are just joining cgroups paths // and there is no need to set any values. @@ -338,37 +412,27 @@ func (m *legacyManager) Set(r *configs.Resources) error { return err } - // We have to freeze the container while systemd sets the cgroup settings. - // The reason for this is that systemd's application of DeviceAllow rules - // is done disruptively, resulting in spurrious errors to common devices - // (unlike our fs driver, they will happily write deny-all rules to running - // containers). So we freeze the container to avoid them hitting the cgroup - // error. But if the freezer cgroup isn't supported, we just warn about it. - targetFreezerState := configs.Undefined - if !m.cgroups.SkipDevices { - // Figure out the current freezer state, so we can revert to it after we - // temporarily freeze the container. - targetFreezerState, err = m.GetFreezerState() - if err != nil { - return err - } - if targetFreezerState == configs.Undefined { - targetFreezerState = configs.Thawed - } + unitName := getUnitName(m.cgroups) + needsFreeze, needsThaw, err := m.freezeBeforeSet(unitName, r) + if err != nil { + return err + } - if err := m.Freeze(configs.Frozen); err != nil { + if needsFreeze { + if err := m.doFreeze(configs.Frozen); err != nil { + // If freezer cgroup isn't supported, we just warn about it. logrus.Infof("freeze container before SetUnitProperties failed: %v", err) } } - - if err := setUnitProperties(m.dbus, getUnitName(m.cgroups), properties...); err != nil { - _ = m.Freeze(targetFreezerState) - return err + setErr := setUnitProperties(m.dbus, unitName, properties...) + if needsThaw { + if err := m.doFreeze(configs.Thawed); err != nil { + logrus.Infof("thaw container after SetUnitProperties failed: %v", err) + } + } + if setErr != nil { + return setErr } - - // Reset freezer state before we apply the configuration, to avoid clashing - // with the freezer setting in the configuration. - _ = m.Freeze(targetFreezerState) for _, sys := range legacySubsystems { // Get the subsystem path, but don't error out for not found cgroups. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go index 8abb0feb7483..7bacab799763 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go @@ -172,7 +172,7 @@ func genV2ResourcesProperties(r *configs.Resources, cm *dbusConnManager) ([]syst // aren't the end of the world, but it is a bit concerning. However // it's unclear if systemd removes all eBPF programs attached when // doing SetUnitProperties... - deviceProperties, err := generateDeviceProperties(r.Devices) + deviceProperties, err := generateDeviceProperties(r) if err != nil { return nil, err } @@ -418,38 +418,10 @@ func (m *unifiedManager) Set(r *configs.Resources) error { return err } - // We have to freeze the container while systemd sets the cgroup settings. - // The reason for this is that systemd's application of DeviceAllow rules - // is done disruptively, resulting in spurrious errors to common devices - // (unlike our fs driver, they will happily write deny-all rules to running - // containers). So we freeze the container to avoid them hitting the cgroup - // error. But if the freezer cgroup isn't supported, we just warn about it. - targetFreezerState := configs.Undefined - if !m.cgroups.SkipDevices { - // Figure out the current freezer state, so we can revert to it after we - // temporarily freeze the container. - targetFreezerState, err = m.GetFreezerState() - if err != nil { - return err - } - if targetFreezerState == configs.Undefined { - targetFreezerState = configs.Thawed - } - - if err := m.Freeze(configs.Frozen); err != nil { - logrus.Infof("freeze container before SetUnitProperties failed: %v", err) - } - } - if err := setUnitProperties(m.dbus, getUnitName(m.cgroups), properties...); err != nil { - _ = m.Freeze(targetFreezerState) return errors.Wrap(err, "error while setting unit properties") } - // Reset freezer state before we apply the configuration, to avoid clashing - // with the freezer setting in the configuration. - _ = m.Freeze(targetFreezerState) - fsMgr, err := m.fsManager() if err != nil { return err diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go index 87d0da842881..43868285821f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go @@ -127,8 +127,20 @@ type Resources struct { // SkipDevices allows to skip configuring device permissions. // Used by e.g. kubelet while creating a parent cgroup (kubepods) - // common for many containers. + // common for many containers, and by runc update. // // NOTE it is impossible to start a container which has this flag set. - SkipDevices bool `json:"skip_devices"` + SkipDevices bool `json:"-"` + + // SkipFreezeOnSet is a flag for cgroup manager to skip the cgroup + // freeze when setting resources. Only applicable to systemd legacy + // (i.e. cgroup v1) manager (which uses freeze by default to avoid + // spurious permission errors caused by systemd inability to update + // device rules in a non-disruptive manner). + // + // If not set, a few methods (such as looking into cgroup's + // devices.list and querying the systemd unit properties) are used + // during Set() to figure out whether the freeze is required. Those + // methods may be relatively slow, thus this flag. + SkipFreezeOnSet bool `json:"-"` } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go index 9c9cbd120aa1..0ac7d819e6d3 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/doc.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/doc.go @@ -1,10 +1,6 @@ /* Package selinux provides a high-level interface for interacting with selinux. -This package uses a selinux build tag to enable the selinux functionality. This -allows non-linux and linux users who do not have selinux support to still use -tools that rely on this library. - Usage: import "github.com/opencontainers/selinux/go-selinux" diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go index 43945551172b..b3d142d8c5bf 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go @@ -25,6 +25,8 @@ var ErrIncompatibleLabel = errors.New("Bad SELinux option z and Z can not be use // the container. A list of options can be passed into this function to alter // the labels. The labels returned will include a random MCS String, that is // guaranteed to be unique. +// If the disabled flag is passed in, the process label will not be set, but the mount label will be set +// to the container_file label with the maximum category. This label is not usable by any confined label. func InitLabels(options []string) (plabel string, mlabel string, retErr error) { if !selinux.GetEnabled() { return "", "", nil @@ -47,7 +49,8 @@ func InitLabels(options []string) (plabel string, mlabel string, retErr error) { } for _, opt := range options { if opt == "disable" { - return "", mountLabel, nil + selinux.ReleaseLabel(mountLabel) + return "", selinux.PrivContainerMountLabel(), nil } if i := strings.Index(opt, ":"); i == -1 { return "", "", errors.Errorf("Bad label option %q, valid options 'disable' or \n'user, role, level, type, filetype' followed by ':' and a value", opt) diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go index d9119908b782..b336ebad3abb 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go @@ -11,9 +11,10 @@ const ( Permissive = 0 // Disabled constant to indicate SELinux is disabled Disabled = -1 - + // maxCategory is the maximum number of categories used within containers + maxCategory = 1024 // DefaultCategoryRange is the upper bound on the category range - DefaultCategoryRange = uint32(1024) + DefaultCategoryRange = uint32(maxCategory) ) var ( @@ -276,3 +277,8 @@ func DisableSecOpt() []string { func GetDefaultContextWithLevel(user, level, scon string) (string, error) { return getDefaultContextWithLevel(user, level, scon) } + +// PrivContainerMountLabel returns mount label for privileged containers +func PrivContainerMountLabel() string { + return privContainerMountLabel +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index 5bfcc0490269..a91a116f8481 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -16,9 +16,9 @@ import ( "strings" "sync" + "github.com/bits-and-blooms/bitset" "github.com/opencontainers/selinux/pkg/pwalk" "github.com/pkg/errors" - "github.com/willf/bitset" "golang.org/x/sys/unix" ) @@ -892,13 +892,13 @@ func openContextFile() (*os.File, error) { return os.Open(lxcPath) } -var labels = loadLabels() +var labels, privContainerMountLabel = loadLabels() -func loadLabels() map[string]string { +func loadLabels() (map[string]string, string) { labels := make(map[string]string) in, err := openContextFile() if err != nil { - return labels + return labels, "" } defer in.Close() @@ -920,7 +920,10 @@ func loadLabels() map[string]string { } } - return labels + con, _ := NewContext(labels["file"]) + con["level"] = fmt.Sprintf("s0:c%d,c%d", maxCategory-2, maxCategory-1) + reserveLabel(con.get()) + return labels, con.get() } // kvmContainerLabels returns the default processLabel and mountLabel to be used diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index 70b7b7c8519d..b7218a0b6a85 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -2,6 +2,8 @@ package selinux +const privContainerMountLabel = "" + func setDisabled() { } diff --git a/vendor/github.com/openshift/api/apiserver/v1/apiserver.openshift.io_apirequestcount.yaml b/vendor/github.com/openshift/api/apiserver/v1/apiserver.openshift.io_apirequestcount.yaml index 3b15501e0fa4..bb4bf8d17e24 100644 --- a/vendor/github.com/openshift/api/apiserver/v1/apiserver.openshift.io_apirequestcount.yaml +++ b/vendor/github.com/openshift/api/apiserver/v1/apiserver.openshift.io_apirequestcount.yaml @@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/897 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" name: apirequestcounts.apiserver.openshift.io diff --git a/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml b/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml index ce911a84d42d..57ec1b33aad7 100644 --- a/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml +++ b/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml @@ -1,23 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: rolebindingrestrictions.authorization.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: rolebindingrestrictions.authorization.openshift.io spec: group: authorization.openshift.io - scope: Namespaced names: kind: RoleBindingRestriction listKind: RoleBindingRestrictionList plural: rolebindingrestrictions singular: rolebindingrestriction + scope: Namespaced versions: - name: v1 - served: true - storage: true schema: openAPIV3Schema: description: RoleBindingRestriction is an object that can be matched against @@ -208,3 +207,5 @@ spec: items: type: string nullable: true + served: true + storage: true diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto index 8f1154f1e289..310d678c1092 100644 --- a/vendor/github.com/openshift/api/build/v1/generated.proto +++ b/vendor/github.com/openshift/api/build/v1/generated.proto @@ -733,6 +733,8 @@ message DockerBuildStrategy { // buildArgs contains build arguments that will be resolved in the Dockerfile. See // https://docs.docker.com/engine/reference/builder/#/arg for more details. + // NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field + // are ignored. repeated k8s.io.api.core.v1.EnvVar buildArgs = 7; // imageOptimizationPolicy describes what optimizations the system can use when building images diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go index 11cce8db9c23..9f9dc8f852ae 100644 --- a/vendor/github.com/openshift/api/build/v1/types.go +++ b/vendor/github.com/openshift/api/build/v1/types.go @@ -730,6 +730,8 @@ type DockerBuildStrategy struct { // buildArgs contains build arguments that will be resolved in the Dockerfile. See // https://docs.docker.com/engine/reference/builder/#/arg for more details. + // NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field + // are ignored. BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,7,rep,name=buildArgs"` // imageOptimizationPolicy describes what optimizations the system can use when building images diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go index 9be845dfb097..6d4dcd281eee 100644 --- a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go @@ -350,7 +350,7 @@ var map_DockerBuildStrategy = map[string]string{ "env": "env contains additional environment variables you want to pass into a builder container.", "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", "dockerfilePath": "dockerfilePath is the path of the Dockerfile that will be used to build the container image, relative to the root of the context (contextDir). Defaults to `Dockerfile` if unset.", - "buildArgs": "buildArgs contains build arguments that will be resolved in the Dockerfile. See https://docs.docker.com/engine/reference/builder/#/arg for more details.", + "buildArgs": "buildArgs contains build arguments that will be resolved in the Dockerfile. See https://docs.docker.com/engine/reference/builder/#/arg for more details. NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field are ignored.", "imageOptimizationPolicy": "imageOptimizationPolicy describes what optimizations the system can use when building images to reduce the final size or time spent building the image. The default policy is 'None' which means the final build image will be equivalent to an image created by the container image build API. The experimental policy 'SkipLayers' will avoid commiting new layers in between each image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' policy. An additional experimental policy 'SkipLayersAndWarn' is the same as 'SkipLayers' but simply warns if compatibility cannot be preserved.", } diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml b/vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml index 14588cba4ce3..7a244e81f53a 100644 --- a/vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml @@ -1,6 +1,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/859 name: cloudprivateipconfigs.cloud.network.openshift.io spec: group: cloud.network.openshift.io diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml index e56801b1e7aa..7e265559eea6 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml @@ -1,47 +1,44 @@ -kind: CustomResourceDefinition apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: - name: clusteroperators.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/497 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: clusteroperators.config.openshift.io spec: group: config.openshift.io names: kind: ClusterOperator listKind: ClusterOperatorList plural: clusteroperators - singular: clusteroperator shortNames: - co + singular: clusteroperator scope: Cluster versions: - - name: v1 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .status.versions[?(@.name=="operator")].version - description: The version the operator is at. + - additionalPrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version name: Version type: string - - jsonPath: .status.conditions[?(@.type=="Available")].status - description: Whether the operator is running and stable. + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status name: Available type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].status - description: Whether the operator is processing changes. + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status name: Progressing type: string - - jsonPath: .status.conditions[?(@.type=="Degraded")].status - description: Whether the operator is degraded. + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status name: Degraded type: string - - jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime - description: The time the operator's Available status last changed. + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime name: Since type: date + name: v1 schema: openAPIV3Schema: description: ClusterOperator is the Custom Resource object which holds the @@ -162,3 +159,7 @@ spec: the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0 type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml index c5be735b6a97..fbc45d362883 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml @@ -1,17 +1,36 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: clusterversions.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/495 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: clusterversions.config.openshift.io spec: group: config.openshift.io + names: + kind: ClusterVersion + plural: clusterversions + singular: clusterversion scope: Cluster versions: - - name: v1 - served: true - storage: true + - additionalPrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + name: v1 schema: openAPIV3Schema: description: ClusterVersion is the configuration for the ClusterVersionOperator. @@ -310,25 +329,7 @@ spec: cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only. type: string + served: true + storage: true subresources: status: {} - additionalPrinterColumns: - - name: Version - type: string - jsonPath: .status.history[?(@.state=="Completed")].version - - name: Available - type: string - jsonPath: .status.conditions[?(@.type=="Available")].status - - name: Progressing - type: string - jsonPath: .status.conditions[?(@.type=="Progressing")].status - - name: Since - type: date - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime - - name: Status - type: string - jsonPath: .status.conditions[?(@.type=="Progressing")].message - names: - plural: clusterversions - singular: clusterversion - kind: ClusterVersion diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml index 8ea625945ac9..dee8a50130f7 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml @@ -1,11 +1,12 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: operatorhubs.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: operatorhubs.config.openshift.io spec: group: config.openshift.io names: @@ -16,12 +17,8 @@ spec: scope: Cluster versions: - name: v1 - subresources: - status: {} - served: true - storage: true - "schema": - "openAPIV3Schema": + schema: + openAPIV3Schema: description: OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa. @@ -103,3 +100,7 @@ spec: description: status indicates success or failure in applying the configuration type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml index ddd5d700de2b..ab7307a45a51 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: proxies.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: proxies.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: Proxy listKind: ProxyList plural: proxies singular: proxy + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Proxy holds cluster-wide information on how to configure default @@ -101,3 +98,7 @@ spec: description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml index bd730570caa2..a5859e68b3ef 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml @@ -1,27 +1,24 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: apiservers.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: apiservers.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: APIServer - singular: apiserver - plural: apiservers listKind: APIServerList + plural: apiservers + singular: apiserver + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: - "openAPIV3Schema": + openAPIV3Schema: description: APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of @@ -165,9 +162,9 @@ spec: tlsSecurityProfile: description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may - change between releases) is chosen. Note that only Old and Intermediate - profiles are currently supported, and the maximum available MinTLSVersions - is VersionTLS12." + change between releases) is chosen. Note that only Old, Intermediate + and Custom profiles are currently supported, and the maximum available + MinTLSVersions is VersionTLS12." type: object properties: custom: @@ -258,3 +255,7 @@ spec: description: status holds observed values from the cluster. They may not be overridden. type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml index b90d578f3e91..910a4c65b120 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: authentications.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: authentications.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: Authentication listKind: AuthenticationList plural: authentications singular: authentication + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Authentication specifies cluster-wide settings for authentication @@ -159,3 +156,7 @@ spec: description: name is the metadata.name of the referenced config map type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml index fd0eea93c3f6..5c67235fe9cf 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml @@ -1,28 +1,25 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: builds.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: builds.config.openshift.io spec: group: config.openshift.io - scope: Cluster - preserveUnknownFields: false names: kind: Build - singular: build - plural: builds listKind: BuildList + plural: builds + singular: build + preserveUnknownFields: false + scope: Cluster versions: - name: v1 - subresources: - status: {} - served: true - storage: true - "schema": - "openAPIV3Schema": + schema: + openAPIV3Schema: description: "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. \n The canonical @@ -398,3 +395,7 @@ spec: to. If the operator is Exists, the value should be empty, otherwise just a regular string. type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml index d7084ba8f792..2e3a826ef690 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consoles.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: consoles.config.openshift.io spec: - scope: Cluster group: config.openshift.io names: kind: Console listKind: ConsoleList plural: consoles singular: console + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Console holds cluster-wide configuration for the web console, @@ -71,3 +68,7 @@ spec: description: The URL for the console. This will be derived from the host for the route that is created for the console. type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml index c05562e64ac4..7550f6e28b60 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml @@ -1,11 +1,12 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: dnses.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: dnses.config.openshift.io spec: group: config.openshift.io names: @@ -16,12 +17,8 @@ spec: scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} - "schema": - "openAPIV3Schema": + schema: + openAPIV3Schema: description: DNS holds cluster-wide information about DNS. The canonical name is `cluster` type: object @@ -101,3 +98,7 @@ spec: description: status holds observed values from the cluster. They may not be overridden. type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml index 8bba554b462b..10f85be09637 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: featuregates.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: featuregates.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: FeatureGate listKind: FeatureGateList plural: featuregates singular: featuregate + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Feature holds cluster-wide information about feature gates. The @@ -76,3 +73,7 @@ spec: description: status holds observed values from the cluster. They may not be overridden. type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml index daed0de9fed4..71ddd49afc10 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: images.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: images.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: Image - singular: image - plural: images listKind: ImageList + plural: images + singular: image + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Image governs policies related to imagestream imports and runtime @@ -159,3 +156,7 @@ spec: can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable. type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml index d8623cd85fd0..9205a4347bb1 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml @@ -1,11 +1,12 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: infrastructures.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: infrastructures.config.openshift.io spec: group: config.openshift.io names: @@ -16,10 +17,6 @@ spec: scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Infrastructure holds cluster-wide information about Infrastructure. The @@ -539,3 +536,7 @@ spec: DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml index 7c1b4f6d7b4d..1a7c294c0e13 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml @@ -1,11 +1,12 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: ingresses.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: ingresses.config.openshift.io spec: group: config.openshift.io names: @@ -16,12 +17,8 @@ spec: scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} - "schema": - "openAPIV3Schema": + schema: + openAPIV3Schema: description: Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`. type: object @@ -297,3 +294,7 @@ spec: resource: description: resource of the referent. type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml index 10eb476ede9b..ae5b3a7332f0 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml @@ -1,11 +1,12 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: networks.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: networks.config.openshift.io spec: group: config.openshift.io names: @@ -13,14 +14,12 @@ spec: listKind: NetworkList plural: networks singular: network - scope: Cluster preserveUnknownFields: false + scope: Cluster versions: - name: v1 - served: true - storage: true - "schema": - "openAPIV3Schema": + schema: + openAPIV3Schema: description: 'Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please @@ -170,3 +169,5 @@ spec: type: array items: type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml index d3097b8745d8..cb4e6d6c5530 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: oauths.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: oauths.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: OAuth listKind: OAuthList plural: oauths singular: oauth + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: OAuth holds cluster-wide information about OAuth. The canonical @@ -674,3 +671,7 @@ spec: description: status holds observed values from the cluster. They may not be overridden. type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml index 6de30407222e..0e433c57df1f 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: projects.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: projects.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: Project listKind: ProjectList plural: projects singular: project + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Project holds cluster-wide information about Project. The canonical @@ -64,3 +61,7 @@ spec: description: status holds observed values from the cluster. They may not be overridden. type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml index c66ec6ad953a..41663c4e43c4 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: schedulers.config.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: schedulers.config.openshift.io spec: group: config.openshift.io - scope: Cluster names: kind: Scheduler - singular: scheduler - plural: schedulers listKind: SchedulerList + plural: schedulers + singular: scheduler + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Scheduler holds cluster-wide config information to run the Kubernetes @@ -104,3 +101,7 @@ spec: description: status holds observed values from the cluster. They may not be overridden. type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 42268db39b53..013105e360ac 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -46,9 +46,9 @@ type APIServerSpec struct { Encryption APIServerEncryption `json:"encryption"` // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. // - // If unset, a default (which may change between releases) is chosen. Note that only Old and - // Intermediate profiles are currently supported, and the maximum available MinTLSVersions - // is VersionTLS12. + // If unset, a default (which may change between releases) is chosen. Note that only Old, + // Intermediate and Custom profiles are currently supported, and the maximum available + // MinTLSVersions is VersionTLS12. // +optional TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` // audit specifies the settings for audit configuration to be applied to all OpenShift-provided diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 22de664b22a9..bc552a7cc490 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -284,7 +284,7 @@ var map_APIServerSpec = map[string]string{ "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", - "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old and Intermediate profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.", "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", } diff --git a/vendor/github.com/openshift/api/console/v1/0000_10_consoleclidownload.crd.yaml b/vendor/github.com/openshift/api/console/v1/0000_10_consoleclidownload.crd.yaml index f742f4b91a4d..48cccf39ae67 100644 --- a/vendor/github.com/openshift/api/console/v1/0000_10_consoleclidownload.crd.yaml +++ b/vendor/github.com/openshift/api/console/v1/0000_10_consoleclidownload.crd.yaml @@ -1,35 +1,32 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consoleclidownloads.console.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 description: Extension for configuring openshift web console command line interface (CLI) downloads. displayName: ConsoleCLIDownload include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: consoleclidownloads.console.openshift.io spec: - scope: Cluster group: console.openshift.io names: - plural: consoleclidownloads - singular: consoleclidownload kind: ConsoleCLIDownload listKind: ConsoleCLIDownloadList + plural: consoleclidownloads + singular: consoleclidownload + scope: Cluster versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: Display name + - additionalPrinterColumns: + - jsonPath: .spec.displayName + name: Display name type: string - jsonPath: .spec.displayName - - name: Age + - jsonPath: .metadata.creationTimestamp + name: Age type: string - jsonPath: .metadata.creationTimestamp - subresources: - status: {} + name: v1 schema: openAPIV3Schema: description: ConsoleCLIDownload is an extension for configuring openshift @@ -82,3 +79,7 @@ spec: text: description: text is the display text for the link type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/console/v1/0000_10_consoleexternalloglink.crd.yaml b/vendor/github.com/openshift/api/console/v1/0000_10_consoleexternalloglink.crd.yaml index 74330e0fa26e..46d34a4ef9c0 100644 --- a/vendor/github.com/openshift/api/console/v1/0000_10_consoleexternalloglink.crd.yaml +++ b/vendor/github.com/openshift/api/console/v1/0000_10_consoleexternalloglink.crd.yaml @@ -1,38 +1,35 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consoleexternalloglinks.console.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 description: ConsoleExternalLogLink is an extension for customizing OpenShift web console log links. displayName: ConsoleExternalLogLinks include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: consoleexternalloglinks.console.openshift.io spec: - scope: Cluster + group: console.openshift.io names: - plural: consoleexternalloglinks - singular: consoleexternalloglink kind: ConsoleExternalLogLink listKind: ConsoleExternalLogLinkList - group: console.openshift.io + plural: consoleexternalloglinks + singular: consoleexternalloglink + scope: Cluster versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: Text + - additionalPrinterColumns: + - jsonPath: .spec.text + name: Text type: string - jsonPath: .spec.text - - name: HrefTemplate + - jsonPath: .spec.hrefTemplate + name: HrefTemplate type: string - jsonPath: .spec.hrefTemplate - - name: Age + - jsonPath: .metadata.creationTimestamp + name: Age type: date - jsonPath: .metadata.creationTimestamp - subresources: - status: {} + name: v1 schema: openAPIV3Schema: description: ConsoleExternalLogLink is an extension for customizing OpenShift @@ -87,3 +84,7 @@ spec: text: description: text is the display text for the link type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/console/v1/0000_10_consolelink.crd.yaml b/vendor/github.com/openshift/api/console/v1/0000_10_consolelink.crd.yaml index 435b766a3cbf..b8f6e2bfed38 100644 --- a/vendor/github.com/openshift/api/console/v1/0000_10_consolelink.crd.yaml +++ b/vendor/github.com/openshift/api/console/v1/0000_10_consolelink.crd.yaml @@ -1,40 +1,37 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consolelinks.console.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 description: Extension for customizing OpenShift web console links displayName: ConsoleLinks include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: consolelinks.console.openshift.io spec: - scope: Cluster group: console.openshift.io names: - plural: consolelinks - singular: consolelink kind: ConsoleLink listKind: ConsoleLinkList + plural: consolelinks + singular: consolelink + scope: Cluster versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: Text + - additionalPrinterColumns: + - jsonPath: .spec.text + name: Text type: string - jsonPath: .spec.text - - name: URL + - jsonPath: .spec.href + name: URL type: string - jsonPath: .spec.href - - name: Menu + - jsonPath: .spec.menu + name: Menu type: string - jsonPath: .spec.menu - - name: Age + - jsonPath: .metadata.creationTimestamp + name: Age type: date - jsonPath: .metadata.creationTimestamp - subresources: - status: {} + name: v1 schema: openAPIV3Schema: description: ConsoleLink is an extension for customizing OpenShift web console @@ -156,3 +153,7 @@ spec: text: description: text is the display text for the link type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/console/v1/0000_10_consolenotification.crd.yaml b/vendor/github.com/openshift/api/console/v1/0000_10_consolenotification.crd.yaml index 94f954dc82cb..9048df7cbd4a 100644 --- a/vendor/github.com/openshift/api/console/v1/0000_10_consolenotification.crd.yaml +++ b/vendor/github.com/openshift/api/console/v1/0000_10_consolenotification.crd.yaml @@ -1,37 +1,34 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consolenotifications.console.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 description: Extension for configuring openshift web console notifications. displayName: ConsoleNotification include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: consolenotifications.console.openshift.io spec: - scope: Cluster group: console.openshift.io names: - plural: consolenotifications - singular: consolenotification kind: ConsoleNotification listKind: ConsoleNotificationList + plural: consolenotifications + singular: consolenotification + scope: Cluster versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: Text + - additionalPrinterColumns: + - jsonPath: .spec.text + name: Text type: string - jsonPath: .spec.text - - name: Location + - jsonPath: .spec.location + name: Location type: string - jsonPath: .spec.location - - name: Age + - jsonPath: .metadata.creationTimestamp + name: Age type: date - jsonPath: .metadata.creationTimestamp - subresources: - status: {} + name: v1 schema: openAPIV3Schema: description: ConsoleNotification is the extension for configuring openshift @@ -90,3 +87,7 @@ spec: text: description: text is the visible text of the notification. type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/console/v1/0000_10_consolequickstart.crd.yaml b/vendor/github.com/openshift/api/console/v1/0000_10_consolequickstart.crd.yaml index d50313337054..9b3e45107c97 100644 --- a/vendor/github.com/openshift/api/console/v1/0000_10_consolequickstart.crd.yaml +++ b/vendor/github.com/openshift/api/console/v1/0000_10_consolequickstart.crd.yaml @@ -1,26 +1,25 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consolequickstarts.console.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/750 description: Extension for guiding user through various workflows in the OpenShift web console. displayName: ConsoleQuickStart include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: consolequickstarts.console.openshift.io spec: - scope: Cluster group: console.openshift.io names: - plural: consolequickstarts - singular: consolequickstart kind: ConsoleQuickStart listKind: ConsoleQuickStartList + plural: consolequickstarts + singular: consolequickstart + scope: Cluster versions: - name: v1 - served: true - storage: true schema: openAPIV3Schema: description: ConsoleQuickStart is an extension for guiding user through various @@ -201,3 +200,5 @@ spec: step heading. type: string minLength: 1 + served: true + storage: true diff --git a/vendor/github.com/openshift/api/console/v1/0000_10_consoleyamlsample.crd.yaml b/vendor/github.com/openshift/api/console/v1/0000_10_consoleyamlsample.crd.yaml index ad98acb432db..39c38ed4cef2 100644 --- a/vendor/github.com/openshift/api/console/v1/0000_10_consoleyamlsample.crd.yaml +++ b/vendor/github.com/openshift/api/console/v1/0000_10_consoleyamlsample.crd.yaml @@ -1,25 +1,24 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consoleyamlsamples.console.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 description: Extension for configuring openshift web console YAML samples. displayName: ConsoleYAMLSample include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: consoleyamlsamples.console.openshift.io spec: - scope: Cluster group: console.openshift.io names: - plural: consoleyamlsamples - singular: consoleyamlsample kind: ConsoleYAMLSample listKind: ConsoleYAMLSampleList + plural: consoleyamlsamples + singular: consoleyamlsample + scope: Cluster versions: - name: v1 - served: true - storage: true schema: openAPIV3Schema: description: ConsoleYAMLSample is an extension for customizing OpenShift web @@ -86,3 +85,5 @@ spec: description: yaml is the YAML sample to display. type: string pattern: ^(.|\s)*\S(.|\s)*$ + served: true + storage: true diff --git a/vendor/github.com/openshift/api/console/v1alpha1/0000_10_consoleplugin.crd.yaml b/vendor/github.com/openshift/api/console/v1alpha1/0000_10_consoleplugin.crd.yaml index a853e46c49a3..fb5cda41c2fd 100644 --- a/vendor/github.com/openshift/api/console/v1alpha1/0000_10_consoleplugin.crd.yaml +++ b/vendor/github.com/openshift/api/console/v1alpha1/0000_10_consoleplugin.crd.yaml @@ -1,23 +1,23 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consoleplugins.console.openshift.io annotations: - include.release.openshift.io/self-managed-high-availability: "true" - displayName: ConsolePlugin + api-approved.openshift.io: https://github.com/openshift/api/pull/764 description: Extension for configuring openshift web console plugins. + displayName: ConsolePlugin + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consoleplugins.console.openshift.io spec: - scope: Cluster group: console.openshift.io names: - plural: consoleplugins - singular: consoleplugin kind: ConsolePlugin listKind: ConsolePluginList + plural: consoleplugins + singular: consoleplugin + scope: Cluster versions: - name: v1alpha1 - served: true - storage: true schema: openAPIV3Schema: description: ConsolePlugin is an extension for customizing OpenShift web console @@ -78,3 +78,5 @@ spec: is listening to. type: integer format: int32 + served: true + storage: true diff --git a/vendor/github.com/openshift/api/helm/v1beta1/0000_10-helm-chart-repository.crd.yaml b/vendor/github.com/openshift/api/helm/v1beta1/0000_10-helm-chart-repository.crd.yaml index ac9d2823c749..eba0d7b66564 100644 --- a/vendor/github.com/openshift/api/helm/v1beta1/0000_10-helm-chart-repository.crd.yaml +++ b/vendor/github.com/openshift/api/helm/v1beta1/0000_10-helm-chart-repository.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: helmchartrepositories.helm.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/598 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: helmchartrepositories.helm.openshift.io spec: - scope: Cluster group: helm.openshift.io names: kind: HelmChartRepository listKind: HelmChartRepositoryList plural: helmchartrepositories singular: helmchartrepository + scope: Cluster versions: - name: v1beta1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: HelmChartRepository holds cluster-wide configuration for proxied @@ -171,3 +168,7 @@ spec: type: string maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/00-crd.yaml b/vendor/github.com/openshift/api/imageregistry/v1/00-crd.yaml index 191ca961fa3c..8b91dfefb7c8 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/00-crd.yaml +++ b/vendor/github.com/openshift/api/imageregistry/v1/00-crd.yaml @@ -1,22 +1,24 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: configs.imageregistry.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/519 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: configs.imageregistry.operator.openshift.io spec: group: imageregistry.operator.openshift.io + names: + kind: Config + listKind: ConfigList + plural: configs + singular: config scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} - "schema": - "openAPIV3Schema": + schema: + openAPIV3Schema: description: Config is the configuration object for a registry instance managed by the registry operator type: object @@ -1535,8 +1537,7 @@ spec: version: description: version is the level this availability applies to type: string - names: - kind: Config - listKind: ConfigList - plural: configs - singular: config + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/01-crd.yaml b/vendor/github.com/openshift/api/imageregistry/v1/01-crd.yaml index 8cbe097f6084..1677610306ba 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/01-crd.yaml +++ b/vendor/github.com/openshift/api/imageregistry/v1/01-crd.yaml @@ -1,22 +1,24 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: imagepruners.imageregistry.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/555 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: imagepruners.imageregistry.operator.openshift.io spec: group: imageregistry.operator.openshift.io + names: + kind: ImagePruner + listKind: ImagePrunerList + plural: imagepruners + singular: imagepruner scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} - "schema": - "openAPIV3Schema": + schema: + openAPIV3Schema: description: ImagePruner is the configuration object for an image registry pruner managed by the registry operator. type: object @@ -1021,8 +1023,7 @@ spec: has been applied. type: integer format: int64 - names: - kind: ImagePruner - listKind: ImagePrunerList - plural: imagepruners - singular: imagepruner + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/install.go b/vendor/github.com/openshift/api/install.go index 00ec821f84db..926cca048792 100644 --- a/vendor/github.com/openshift/api/install.go +++ b/vendor/github.com/openshift/api/install.go @@ -20,6 +20,7 @@ import ( kextensionsv1beta1 "k8s.io/api/extensions/v1beta1" kimagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" knetworkingv1 "k8s.io/api/networking/v1" + kpolicyv1 "k8s.io/api/policy/v1" kpolicyv1beta1 "k8s.io/api/policy/v1beta1" krbacv1 "k8s.io/api/rbac/v1" krbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -112,6 +113,7 @@ var ( kextensionsv1beta1.AddToScheme, kimagepolicyv1alpha1.AddToScheme, knetworkingv1.AddToScheme, + kpolicyv1.AddToScheme, kpolicyv1beta1.AddToScheme, krbacv1.AddToScheme, krbacv1beta1.AddToScheme, diff --git a/vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml b/vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml index aa8c848749cd..b0ed41debf5a 100644 --- a/vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml +++ b/vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml @@ -1,6 +1,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 name: clusternetworks.network.openshift.io spec: group: network.openshift.io @@ -11,22 +13,20 @@ spec: singular: clusternetwork scope: Cluster versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: Cluster Network - type: string - description: The primary cluster network CIDR + - additionalPrinterColumns: + - description: The primary cluster network CIDR jsonPath: .network - - name: Service Network + name: Cluster Network type: string - description: The service network CIDR + - description: The service network CIDR jsonPath: .serviceNetwork - - name: Plugin Name + name: Service Network type: string - description: The OpenShift SDN network plug-in in use + - description: The OpenShift SDN network plug-in in use jsonPath: .pluginName + name: Plugin Name + type: string + name: v1 schema: openAPIV3Schema: description: ClusterNetwork describes the cluster network. There is normally @@ -118,6 +118,8 @@ spec: format: int32 maximum: 65535 minimum: 1 + served: true + storage: true status: acceptedNames: kind: "" diff --git a/vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml b/vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml index c101d06f9141..a30fdc2db426 100644 --- a/vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml +++ b/vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml @@ -1,6 +1,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 name: hostsubnets.network.openshift.io spec: group: network.openshift.io @@ -11,32 +13,30 @@ spec: singular: hostsubnet scope: Cluster versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: Host - type: string - description: The name of the node + - additionalPrinterColumns: + - description: The name of the node jsonPath: .host - - name: Host IP + name: Host type: string - description: The IP address to be used as a VTEP by other nodes in the overlay + - description: The IP address to be used as a VTEP by other nodes in the overlay network jsonPath: .hostIP - - name: Subnet + name: Host IP type: string - description: The CIDR range of the overlay network assigned to the node for + - description: The CIDR range of the overlay network assigned to the node for its pods jsonPath: .subnet - - name: Egress CIDRs + name: Subnet type: string - description: The network egress CIDRs + - description: The network egress CIDRs jsonPath: .egressCIDRs - - name: Egress IPs + name: Egress CIDRs type: string - description: The network egress IP addresses + - description: The network egress IP addresses jsonPath: .egressIPs + name: Egress IPs + type: string + name: v1 schema: openAPIV3Schema: description: HostSubnet describes the container subnet network on a node. @@ -96,6 +96,8 @@ spec: to the node for its pods type: string pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + served: true + storage: true status: acceptedNames: kind: "" diff --git a/vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml b/vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml index 42229763689e..ae8e076a6139 100644 --- a/vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml +++ b/vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml @@ -1,6 +1,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 name: netnamespaces.network.openshift.io spec: group: network.openshift.io @@ -11,18 +13,16 @@ spec: singular: netnamespace scope: Cluster versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: NetID - type: integer - description: The network identifier of the network namespace + - additionalPrinterColumns: + - description: The network identifier of the network namespace jsonPath: .netid - - name: Egress IPs - type: string - description: The network egress IP addresses + name: NetID + type: integer + - description: The network egress IP addresses jsonPath: .egressIPs + name: Egress IPs + type: string + name: v1 schema: openAPIV3Schema: description: NetNamespace describes a single isolated network. When using @@ -70,6 +70,8 @@ spec: same as the object's name, but both fields must be set.) type: string pattern: ^[a-z0-9.-]+$ + served: true + storage: true status: acceptedNames: kind: "" diff --git a/vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml b/vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml index 26bd4df1b65f..66fc1ae2d116 100644 --- a/vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml +++ b/vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml @@ -1,6 +1,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 name: egressnetworkpolicies.network.openshift.io spec: group: network.openshift.io @@ -12,10 +14,8 @@ spec: scope: Namespaced versions: - name: v1 - served: true - storage: true - "schema": - "openAPIV3Schema": + schema: + openAPIV3Schema: description: EgressNetworkPolicy describes the current egress network policy for a Namespace. When using the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address outside the cluster will be @@ -78,6 +78,8 @@ spec: description: type marks this as an "Allow" or "Deny" rule type: string pattern: ^Allow|Deny$ + served: true + storage: true status: acceptedNames: kind: "" diff --git a/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml b/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml index 6d7678ff681c..f1cceeaf9ee7 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml +++ b/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml @@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/851 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" creationTimestamp: null @@ -72,9 +73,6 @@ spec: be automatically determined. Can be IPv4 or IPv6. type: string ip: - anyOf: - - format: ipv4 - - format: ipv6 description: IP is the address to configure on the router's interface. Can be IPv4 or IPv6. type: string diff --git a/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml-patch b/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml-patch index 0598dbd58d2d..3f1cc0342b0f 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml-patch +++ b/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml-patch @@ -1,8 +1,3 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/addresses/items/properties/ip/anyOf - value: - - format: ipv4 - - format: ipv6 - op: add path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/addresses/items/properties/gateway/anyOf value: diff --git a/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml index 525b81c0e79c..195e3e065d90 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml @@ -1,26 +1,23 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: configs.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/612 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: configs.operator.openshift.io spec: - scope: Cluster group: operator.openshift.io names: + categories: + - coreoperators kind: Config plural: configs singular: config - categories: - - coreoperators + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Config provides information to configure the config operator. @@ -162,3 +159,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml index d57ec9e35a5b..565ac244014f 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml @@ -1,26 +1,23 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: etcds.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/752 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: etcds.operator.openshift.io spec: - scope: Cluster group: operator.openshift.io names: + categories: + - coreoperators kind: Etcd plural: etcds singular: etcd - categories: - - coreoperators + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Etcd provides information to configure an operator to manage @@ -228,3 +225,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml index 1f37dcc5e5ce..63ba8cdded37 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml @@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml index 78bb0b7b1f42..457d036ab634 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml @@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" @@ -105,6 +106,15 @@ spec: nullable: true type: object x-kubernetes-preserve-unknown-fields: true + useMoreSecureServiceCA: + default: false + description: useMoreSecureServiceCA indicates that the service-ca.crt + provided in SA token volumes should include only enough certificates + to validate service serving certificates. Once set to true, it cannot + be set to false. Even if someone finds a way to set it back to false, + the service-ca.crt files that previously existed will only have + the more secure content. + type: boolean type: object status: description: status is the most recently observed status of the Kubernetes diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml index a72704706933..ea2329342b6c 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml @@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml index ad20dab67d2b..6826d2c6a328 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml @@ -1,26 +1,23 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: openshiftapiservers.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: openshiftapiservers.operator.openshift.io spec: - scope: Cluster group: operator.openshift.io names: + categories: + - coreoperators kind: OpenShiftAPIServer plural: openshiftapiservers singular: openshiftapiserver - categories: - - coreoperators + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: OpenShiftAPIServer provides information to configure an operator @@ -168,3 +165,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml index ef2ec14c8cb6..d7c932b6f7f4 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml @@ -1,24 +1,21 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: cloudcredentials.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/692 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: cloudcredentials.operator.openshift.io spec: - scope: Cluster group: operator.openshift.io names: kind: CloudCredential listKind: CloudCredentialList plural: cloudcredentials singular: cloudcredential + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: CloudCredential provides a means to configure an operator to @@ -176,3 +173,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml index bb616a307d66..f6cb9db1b02d 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml @@ -1,11 +1,12 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: kubestorageversionmigrators.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/503 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: kubestorageversionmigrators.operator.openshift.io spec: group: operator.openshift.io names: @@ -16,12 +17,8 @@ spec: scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: - "openAPIV3Schema": + openAPIV3Schema: description: KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator. type: object @@ -157,3 +154,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml index 964793f34dd3..c15d59480f21 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml @@ -1,23 +1,20 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: authentications.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: authentications.operator.openshift.io spec: - scope: Cluster group: operator.openshift.io names: kind: Authentication plural: authentications singular: authentication + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Authentication provides information to configure an operator @@ -166,3 +163,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml index 7dc44d28b4ed..0994b896bca0 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml @@ -1,26 +1,23 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: openshiftcontrollermanagers.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: openshiftcontrollermanagers.operator.openshift.io spec: - scope: Cluster group: operator.openshift.io names: + categories: + - coreoperators kind: OpenShiftControllerManager plural: openshiftcontrollermanagers singular: openshiftcontrollermanager - categories: - - coreoperators + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: OpenShiftControllerManager provides information to configure @@ -158,3 +155,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml index 18926b9d4b51..7ae9984e4f30 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml @@ -1,11 +1,12 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: storages.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/670 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: storages.operator.openshift.io spec: group: operator.openshift.io names: @@ -15,10 +16,6 @@ spec: scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Storage provides a means to configure an operator to manage the @@ -159,3 +156,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml index eaaecf0b237d..a34f3452490c 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml @@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/616 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml index 40acfb0810f3..c7951a7c57b9 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: servicecas.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: servicecas.operator.openshift.io spec: - scope: Cluster group: operator.openshift.io names: kind: ServiceCA listKind: ServiceCAList plural: servicecas singular: serviceca + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: ServiceCA provides information to configure an operator to manage @@ -160,3 +157,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml index 2cd8df93eb52..3bb668193233 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml @@ -1,10 +1,11 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: networks.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: networks.operator.openshift.io spec: group: operator.openshift.io names: @@ -15,8 +16,6 @@ spec: scope: Cluster versions: - name: v1 - served: true - storage: true schema: openAPIV3Schema: description: Network describes the cluster's desired network configuration. @@ -423,7 +422,7 @@ spec: minItems: 1 items: type: string - pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):[0-9]+$ + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ netFlow: description: netFlow defines the NetFlow configuration. type: object @@ -437,7 +436,7 @@ spec: minItems: 1 items: type: string - pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):[0-9]+$ + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ sFlow: description: sFlow defines the SFlow configuration. type: object @@ -450,7 +449,7 @@ spec: minItems: 1 items: type: string - pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):[0-9]+$ + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ kubeProxyConfig: description: kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift @@ -623,3 +622,5 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml index 2253aaa42360..4bef02fe2834 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consoles.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/486 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: consoles.operator.openshift.io spec: - scope: Cluster group: operator.openshift.io names: kind: Console listKind: ConsoleList plural: consoles singular: console + scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: Console provides a means to configure an operator to manage the @@ -367,3 +364,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml index 66e595bb8011..b9386b0a4536 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml @@ -3,6 +3,7 @@ kind: CustomResourceDefinition metadata: name: dnses.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" @@ -71,8 +72,13 @@ spec: type: string tolerations: description: "tolerations is a list of tolerations applied to - DNS pods. \n The default is an empty list. This default is - subject to change. \n See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/" + DNS pods. \n If empty, the operator sets a toleration for the + \"node-role.kubernetes.io/master\" taint. This default is subject + to change. Specifying tolerations without including a toleration + for the \"node-role.kubernetes.io/master\" taint may be risky + as it could lead to an outage if all worker nodes become unavailable. + \n Note that the daemon controller adds some tolerations as + well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/" type: array items: description: The pod this Toleration is attached to tolerates diff --git a/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml index 720253030523..5e4596f018aa 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml @@ -1,11 +1,12 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: csisnapshotcontrollers.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/562 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: csisnapshotcontrollers.operator.openshift.io spec: group: operator.openshift.io names: @@ -15,10 +16,6 @@ spec: scope: Cluster versions: - name: v1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: CSISnapshotController provides a means to configure an operator @@ -159,3 +156,7 @@ spec: version: description: version is the level this availability applies to type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml index ff66f8c2ad47..21c94a013206 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml @@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/701 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go index ce4cf32389f6..0f73dc373b5a 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_dns.go +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -110,9 +110,14 @@ type DNSNodePlacement struct { // tolerations is a list of tolerations applied to DNS pods. // - // The default is an empty list. This default is subject to change. - // - // See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + // If empty, the DNS operator sets a toleration for the + // "node-role.kubernetes.io/master" taint. This default is subject to + // change. Specifying tolerations without including a toleration for + // the "node-role.kubernetes.io/master" taint may be risky as it could + // lead to an outage if all worker nodes become unavailable. + // + // Note that the daemon controller adds some tolerations as well. See + // https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ // // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go index c20ae30ccd7c..29fa32e4f757 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go @@ -25,6 +25,14 @@ type KubeControllerManager struct { type KubeControllerManagerSpec struct { StaticPodOperatorSpec `json:",inline"` + + // useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only + // enough certificates to validate service serving certificates. + // Once set to true, it cannot be set to false. + // Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will + // only have the more secure content. + // +kubebuilder:default=false + UseMoreSecureServiceCA bool `json:"useMoreSecureServiceCA"` } type KubeControllerManagerStatus struct { diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 33b23bc8a786..2c15993e3478 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -415,7 +415,7 @@ type IPFIXConfig struct { Collectors []IPPort `json:"collectors,omitempty"` } -// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):[0-9]+$` +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` type IPPort string type PolicyAuditConfig struct { diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 0324f679c502..bef28dfdb273 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -396,7 +396,7 @@ func (DNSList) SwaggerDoc() map[string]string { var map_DNSNodePlacement = map[string]string{ "": "DNSNodePlacement describes the node scheduling configuration for DNS pods.", "nodeSelector": "nodeSelector is the node selector applied to DNS pods.\n\nIf empty, the default is used, which is currently the following:\n\n kubernetes.io/os: linux\n\nThis default is subject to change.\n\nIf set, the specified selector is used and replaces the default.", - "tolerations": "tolerations is a list of tolerations applied to DNS pods.\n\nThe default is an empty list. This default is subject to change.\n\nSee https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/", + "tolerations": "tolerations is a list of tolerations applied to DNS pods.\n\nIf empty, the DNS operator sets a toleration for the \"node-role.kubernetes.io/master\" taint. This default is subject to change. Specifying tolerations without including a toleration for the \"node-role.kubernetes.io/master\" taint may be risky as it could lead to an outage if all worker nodes become unavailable.\n\nNote that the daemon controller adds some tolerations as well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/", } func (DNSNodePlacement) SwaggerDoc() map[string]string { @@ -793,6 +793,14 @@ func (KubeControllerManagerList) SwaggerDoc() map[string]string { return map_KubeControllerManagerList } +var map_KubeControllerManagerSpec = map[string]string{ + "useMoreSecureServiceCA": "useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only enough certificates to validate service serving certificates. Once set to true, it cannot be set to false. Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will only have the more secure content.", +} + +func (KubeControllerManagerSpec) SwaggerDoc() map[string]string { + return map_KubeControllerManagerSpec +} + var map_KubeStorageVersionMigrator = map[string]string{ "": "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.", } diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml index 92d1404fcddf..b160c5bace6f 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml @@ -1,25 +1,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: imagecontentsourcepolicies.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: imagecontentsourcepolicies.operator.openshift.io spec: group: operator.openshift.io - scope: Cluster names: kind: ImageContentSourcePolicy - singular: imagecontentsourcepolicy - plural: imagecontentsourcepolicies listKind: ImageContentSourcePolicyList + plural: imagecontentsourcepolicies + singular: imagecontentsourcepolicy + scope: Cluster versions: - name: v1alpha1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: ImageContentSourcePolicy holds cluster-wide information about @@ -91,3 +88,7 @@ spec: description: source is the repository that users refer to, e.g. in image pull specifications. type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/0000_10-pod-network-connectivity-check.crd.yaml b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/0000_10-pod-network-connectivity-check.crd.yaml index 1009c9162a5a..49bf08eebb6d 100644 --- a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/0000_10-pod-network-connectivity-check.crd.yaml +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/0000_10-pod-network-connectivity-check.crd.yaml @@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/639 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" name: podnetworkconnectivitychecks.controlplane.operator.openshift.io @@ -15,10 +16,6 @@ spec: scope: Namespaced versions: - name: v1alpha1 - served: true - storage: true - subresources: - status: {} schema: openAPIV3Schema: description: PodNetworkConnectivityCheck @@ -257,3 +254,7 @@ spec: type: string format: date-time nullable: true + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml b/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml index 2602365503a7..2176d147755a 100644 --- a/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml +++ b/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml @@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" diff --git a/vendor/github.com/openshift/api/samples/v1/0000_10_samplesconfig.crd.yaml b/vendor/github.com/openshift/api/samples/v1/0000_10_samplesconfig.crd.yaml index f0bd21edab73..c30ba2482eba 100644 --- a/vendor/github.com/openshift/api/samples/v1/0000_10_samplesconfig.crd.yaml +++ b/vendor/github.com/openshift/api/samples/v1/0000_10_samplesconfig.crd.yaml @@ -1,25 +1,27 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: configs.samples.operator.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/513 description: Extension for configuring openshif samples operator. displayName: ConfigsSamples include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: configs.samples.operator.openshift.io spec: - scope: Cluster - preserveUnknownFields: false group: samples.operator.openshift.io + names: + kind: Config + listKind: ConfigList + plural: configs + singular: config + preserveUnknownFields: false + scope: Cluster versions: - name: v1 - subresources: - status: {} - served: true - storage: true - "schema": - "openAPIV3Schema": + schema: + openAPIV3Schema: description: Config contains the configuration and detailed condition status for the Samples Operator. type: object @@ -171,8 +173,7 @@ spec: description: version is the value of the operator's payload based version indicator when it was last successfully processed type: string - names: - plural: configs - singular: config - kind: Config - listKind: ConfigList + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml b/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml index 480f7b273078..ab2f053030ba 100644 --- a/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml +++ b/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml @@ -1,61 +1,60 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: securitycontextconstraints.security.openshift.io annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + name: securitycontextconstraints.security.openshift.io spec: group: security.openshift.io - scope: Cluster names: kind: SecurityContextConstraints listKind: SecurityContextConstraintsList plural: securitycontextconstraints singular: securitycontextconstraints + scope: Cluster versions: - - name: v1 - served: true - storage: true - additionalPrinterColumns: - - name: Priv - type: string + - additionalPrinterColumns: + - description: Determines if a container can request to be run as privileged jsonPath: .allowPrivilegedContainer - description: Determines if a container can request to be run as privileged - - name: Caps + name: Priv type: string + - description: A list of capabilities that can be requested to add to the container jsonPath: .allowedCapabilities - description: A list of capabilities that can be requested to add to the container - - name: SELinux + name: Caps type: string + - description: Strategy that will dictate what labels will be set in the SecurityContext jsonPath: .seLinuxContext.type - description: Strategy that will dictate what labels will be set in the SecurityContext - - name: RunAsUser + name: SELinux type: string + - description: Strategy that will dictate what RunAsUser is used in the SecurityContext jsonPath: .runAsUser.type - description: Strategy that will dictate what RunAsUser is used in the SecurityContext - - name: FSGroup + name: RunAsUser type: string + - description: Strategy that will dictate what fs group is used by the SecurityContext jsonPath: .fsGroup.type - description: Strategy that will dictate what fs group is used by the SecurityContext - - name: SupGroup + name: FSGroup type: string - jsonPath: .supplementalGroups.type - description: Strategy that will dictate what supplemental groups are used by + - description: Strategy that will dictate what supplemental groups are used by the SecurityContext - - name: Priority + jsonPath: .supplementalGroups.type + name: SupGroup type: string + - description: Sort order of SCCs jsonPath: .priority - description: Sort order of SCCs - - name: ReadOnlyRootFS + name: Priority type: string + - description: Force containers to run with a read only root file system jsonPath: .readOnlyRootFilesystem - description: Force containers to run with a read only root file system - - name: Volumes + name: ReadOnlyRootFS type: string + - description: White list of allowed volume plugins jsonPath: .volumes - description: White list of allowed volume plugins + name: Volumes + type: string + name: v1 schema: openAPIV3Schema: description: SecurityContextConstraints governs the ability to make requests @@ -360,3 +359,5 @@ spec: are used by volumes. type: string nullable: true + served: true + storage: true diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/accept.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/accept.go index 8ff6c7d70eec..c30bc9a5d0b7 100644 --- a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/accept.go +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/accept.go @@ -27,7 +27,7 @@ type policyDecision struct { resolutionErr error } -func accept(accepter rules.Accepter, policy imageResolutionPolicy, resolver imageResolver, m imagereferencemutators.ImageReferenceMutator, annotations imagereferencemutators.AnnotationAccessor, attr admission.Attributes, excludedRules sets.String) error { +func accept(accepter rules.Accepter, policy imageResolutionPolicy, resolver imageResolver, m imagereferencemutators.ImageReferenceMutator, annotations imagereferencemutators.AnnotationAccessor, attr admission.Attributes, excludedRules sets.String, mutationAllowed bool) error { decisions := policyDecisions{} t := attr.GetResource().GroupResource() @@ -58,13 +58,22 @@ func accept(accepter rules.Accepter, policy imageResolutionPolicy, resolver imag decision.resolutionErr = err case err == nil: + oldDecissionAttributes := decision.attrs // if we resolved properly, assign the attributes and rewrite the pull spec if we need to decision.attrs = resolvedAttrs if policy.RewriteImagePullSpec(resolvedAttrs, attr.GetOperation() == admission.Update, gr) { - ref.Namespace = "" - ref.Name = decision.attrs.Name.Exact() - ref.Kind = "DockerImage" + refUpdate := kapi.ObjectReference{Kind: "DockerImage", Name: resolvedAttrs.Name.Exact()} + + // check if we are mutating object in validate phase and discard the update + // this allows creation of objects like imagestreams in between admit and validate + if !mutationAllowed && (ref.Namespace != refUpdate.Namespace || ref.Name != refUpdate.Name || ref.Kind != refUpdate.Kind) { + klog.V(5).Infof("image resolution changed between admit and verify: falling back to the old image attributes (attributes=%#v)", oldDecissionAttributes) + } else { + ref.Namespace = refUpdate.Namespace + ref.Name = refUpdate.Name + ref.Kind = refUpdate.Kind + } } } } diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/defaults.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/defaults.go index 6c2d1499caf9..b0fbe3a573b4 100644 --- a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/defaults.go +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1/defaults.go @@ -25,6 +25,7 @@ func SetDefaults_ImagePolicyConfig(obj *ImagePolicyConfig) { obj.ResolutionRules = []ImageResolutionPolicyRule{ {TargetResource: metav1.GroupResource{Group: "", Resource: "pods"}, LocalNames: true}, {TargetResource: metav1.GroupResource{Group: "", Resource: "replicationcontrollers"}, LocalNames: true}, + {TargetResource: metav1.GroupResource{Group: "apps.openshift.io", Resource: "deploymentconfigs"}, LocalNames: true}, {TargetResource: metav1.GroupResource{Group: "apps", Resource: "daemonsets"}, LocalNames: true}, {TargetResource: metav1.GroupResource{Group: "apps", Resource: "deployments"}, LocalNames: true}, {TargetResource: metav1.GroupResource{Group: "apps", Resource: "statefulsets"}, LocalNames: true}, diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagepolicy.go b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagepolicy.go index df97813f821b..855a554a86db 100644 --- a/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagepolicy.go +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagepolicy.go @@ -200,6 +200,15 @@ func (a *ImagePolicyPlugin) admit(ctx context.Context, attr admission.Attributes return nil } + if obj, ok := attr.GetObject().(metav1.Object); ok { + for _, ownerRef := range obj.GetOwnerReferences() { + if ownerRef.Controller != nil && *ownerRef.Controller { + klog.V(5).Infof("skipping image policy admission for %s:%s/%s, reason: has controller owner reference", attr.GetKind(), attr.GetNamespace(), attr.GetName()) + return nil + } + } + } + klog.V(5).Infof("running image policy admission for %s:%s/%s", attr.GetKind(), attr.GetNamespace(), attr.GetName()) m, err := a.imageMutators.GetImageReferenceMutator(attr.GetObject(), attr.GetOldObject()) if err != nil { @@ -222,7 +231,7 @@ func (a *ImagePolicyPlugin) admit(ctx context.Context, attr admission.Attributes } } - if err := accept(a.accepter, policy, a.resolver, m, annotations, attr, excluded); err != nil { + if err := accept(a.accepter, policy, a.resolver, m, annotations, attr, excluded, mutationAllowed); err != nil { return err } @@ -479,8 +488,6 @@ var skipImageRewriteOnUpdate = map[metav1.GroupResource]struct{}{ {Group: "batch", Resource: "jobs"}: {}, // Build specs are immutable, they cannot be updated. {Group: "build.openshift.io", Resource: "builds"}: {}, - // TODO: remove when statefulsets allow spec.template updates in 3.7 - {Group: "apps", Resource: "statefulsets"}: {}, } // RewriteImagePullSpec applies to implicit rewrite attributes and local resources as well as if the policy requires it. diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/admission.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/admission.go index 990fd7a012e9..67c51db8056f 100644 --- a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/admission.go +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission/admission.go @@ -8,8 +8,11 @@ import ( "strings" "time" + "k8s.io/apimachinery/pkg/util/sets" + apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/labels" + kutilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" @@ -131,6 +134,37 @@ func (c *constraint) Validate(ctx context.Context, a admission.Attributes, _ adm return admission.NewForbidden(a, fmt.Errorf("unable to validate against any security context constraint: %v", validationErrs)) } +// these are the SCCs created by the cluster-kube-apiserver-operator. +// see the list in https://github.com/openshift/cluster-kube-apiserver-operator/blob/3b0218cf9778cbcf2650ad5aa4e01d7b40a2d05e/bindata/bootkube/scc-manifests/0000_20_kube-apiserver-operator_00_scc-restricted.yaml +// if these are not present, the lister isn't really finished listing. +var standardSCCNames = sets.NewString( + "anyuid", + "hostaccess", + "hostmount-anyuid", + "hostnetwork", + "nonroot", + "privileged", + "restricted", +) + +func requireStandardSCCs(sccs []*securityv1.SecurityContextConstraints, err error) error { + if err != nil { + return err + } + + allCurrentSCCNames := sets.NewString() + for _, curr := range sccs { + allCurrentSCCNames.Insert(curr.Name) + } + + missingSCCs := standardSCCNames.Difference(allCurrentSCCNames) + if len(missingSCCs) == 0 { + return nil + } + + return fmt.Errorf("securitycontextconstraints.security.openshift.io cache is missing %v", strings.Join(missingSCCs.List(), ", ")) +} + func (c *constraint) computeSecurityContext(ctx context.Context, a admission.Attributes, pod *coreapi.Pod, specMutationAllowed bool, validatedSCCHint string) (*coreapi.Pod, string, field.ErrorList, error) { // get all constraints that are usable by the user klog.V(4).Infof("getting security context constraints for pod %s (generate: %s) in namespace %s with user info %v", pod.Name, pod.GenerateName, a.GetNamespace(), a.GetUserInfo()) @@ -142,6 +176,24 @@ func (c *constraint) computeSecurityContext(ctx context.Context, a admission.Att return nil, "", nil, admission.NewForbidden(a, fmt.Errorf("securitycontextconstraints.security.openshift.io cache is not synchronized")) } + // wait a few seconds until the synchronized list returns all the required SCCs created by the kas-o. + // If this doesn't happen, then indicate which ones are missing. This seems odd, but our CI system suggests that this happens occasionally. + // If the SCCs were all deleted, then no pod will pass SCC admission until the SCCs are recreated, but the kas-o (which recreates them) + // bypasses SCC admission, so this does not create a cycle. + var requiredSCCErr error + err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { + if requiredSCCErr = requireStandardSCCs(c.sccLister.List(labels.Everything())); requiredSCCErr != nil { + return false, nil + } + return true, nil + }) + if err != nil { + if requiredSCCErr != nil { + return nil, "", nil, admission.NewForbidden(a, requiredSCCErr) + } + return nil, "", nil, admission.NewForbidden(a, fmt.Errorf("securitycontextconstraints.security.openshift.io required check failed oddly")) + } + constraints, err := sccmatching.NewDefaultSCCMatcher(c.sccLister, nil).FindApplicableSCCs(ctx, a.GetNamespace()) if err != nil { return nil, "", nil, admission.NewForbidden(a, err) @@ -171,8 +223,11 @@ func (c *constraint) computeSecurityContext(ctx context.Context, a admission.Att return i < j }) - providers, errs := sccmatching.CreateProvidersFromConstraints(a.GetNamespace(), constraints, c.client) + providers, errs := sccmatching.CreateProvidersFromConstraints(ctx, a.GetNamespace(), constraints, c.client) logProviders(pod, providers, errs) + if len(errs) > 0 { + return nil, "", nil, kutilerrors.NewAggregate(errs) + } if len(providers) == 0 { return nil, "", nil, admission.NewForbidden(a, fmt.Errorf("no SecurityContextConstraintsProvider available to validate pod request")) @@ -388,6 +443,6 @@ func logProviders(pod *coreapi.Pod, providers []sccmatching.SecurityContextConst klog.V(4).Infof("validating pod %s (generate: %s) against providers %s", pod.Name, pod.GenerateName, strings.Join(names, ",")) for _, err := range providerCreationErrs { - klog.V(4).Infof("provider creation error: %v", err) + klog.V(2).Infof("provider creation error: %v", err) } } diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/matcher.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/matcher.go index 7625979b4a5d..5cce48927a0a 100644 --- a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/matcher.go +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/matcher.go @@ -5,16 +5,17 @@ import ( "fmt" "sort" "strings" - - "k8s.io/klog/v2" + "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" kapi "k8s.io/kubernetes/pkg/apis/core" "github.com/openshift/api/security" @@ -158,17 +159,9 @@ func constraintSupportsGroup(group string, constraintGroups []string) bool { return false } -// getNamespaceByName retrieves a namespace only if ns is nil. -func getNamespaceByName(name string, ns *corev1.Namespace, client kubernetes.Interface) (*corev1.Namespace, error) { - if ns != nil && name == ns.Name { - return ns, nil - } - return client.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) -} - // CreateProvidersFromConstraints creates providers from the constraints supplied, including // looking up pre-allocated values if necessary using the pod's namespace. -func CreateProvidersFromConstraints(ns string, sccs []*securityv1.SecurityContextConstraints, client kubernetes.Interface) ([]SecurityContextConstraintsProvider, []error) { +func CreateProvidersFromConstraints(ctx context.Context, namespaceName string, sccs []*securityv1.SecurityContextConstraints, client kubernetes.Interface) ([]SecurityContextConstraintsProvider, []error) { var ( // namespace is declared here for reuse but we will not fetch it unless required by the matched constraints namespace *corev1.Namespace @@ -178,13 +171,39 @@ func CreateProvidersFromConstraints(ns string, sccs []*securityv1.SecurityContex errs []error ) + var lastErr error + err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { + namespace, lastErr = client.CoreV1().Namespaces().Get(ctx, namespaceName, metav1.GetOptions{}) + if lastErr != nil { + return false, nil + } + + if _, ok := namespace.GetAnnotations()[securityv1.UIDRangeAnnotation]; !ok { + lastErr = fmt.Errorf("unable to find annotation %s", securityv1.UIDRangeAnnotation) + return false, nil + } + + if _, ok := namespace.GetAnnotations()[securityv1.MCSAnnotation]; !ok { + lastErr = fmt.Errorf("unable to find annotation %s", securityv1.MCSAnnotation) + return false, nil + } + + return true, nil + }) + if err != nil { + if lastErr != nil { + return nil, []error{fmt.Errorf("error fetching namespace %q: %w", namespaceName, lastErr)} + } + return nil, []error{fmt.Errorf("error fetching namespace %q: %w", namespaceName, err)} + } + // set pre-allocated values on constraints for _, constraint := range sccs { var ( provider SecurityContextConstraintsProvider err error ) - provider, namespace, err = CreateProviderFromConstraint(ns, namespace, constraint, client) + provider, err = CreateProviderFromConstraint(namespace, constraint) if err != nil { errs = append(errs, err) continue @@ -195,36 +214,23 @@ func CreateProvidersFromConstraints(ns string, sccs []*securityv1.SecurityContex } // CreateProviderFromConstraint creates a SecurityContextConstraintProvider from a SecurityContextConstraint -func CreateProviderFromConstraint(ns string, namespace *corev1.Namespace, constraint *securityv1.SecurityContextConstraints, client kubernetes.Interface) (SecurityContextConstraintsProvider, *corev1.Namespace, error) { +func CreateProviderFromConstraint(namespace *corev1.Namespace, constraint *securityv1.SecurityContextConstraints) (SecurityContextConstraintsProvider, error) { var err error - resolveUIDRange := requiresPreAllocatedUIDRange(constraint) - resolveSELinuxLevel := requiresPreAllocatedSELinuxLevel(constraint) - resolveFSGroup := requiresPreallocatedFSGroup(constraint) - resolveSupplementalGroups := requiresPreallocatedSupplementalGroups(constraint) - requiresNamespaceAllocations := resolveUIDRange || resolveSELinuxLevel || resolveFSGroup || resolveSupplementalGroups - - if requiresNamespaceAllocations { - // Ensure we have the namespace - namespace, err = getNamespaceByName(ns, namespace, client) - if err != nil { - return nil, namespace, fmt.Errorf("error fetching namespace %s required to preallocate values for %s: %v", ns, constraint.Name, err) - } - } // Make a copy of the constraint so we don't mutate the store's cache constraint = constraint.DeepCopy() // Resolve the values from the namespace - if resolveUIDRange { + if requiresPreAllocatedUIDRange(constraint) { constraint.RunAsUser.UIDRangeMin, constraint.RunAsUser.UIDRangeMax, err = getPreallocatedUIDRange(namespace) if err != nil { - return nil, namespace, fmt.Errorf("unable to find pre-allocated uid annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) + return nil, fmt.Errorf("unable to find pre-allocated uid annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) } } - if resolveSELinuxLevel { + if requiresPreAllocatedSELinuxLevel(constraint) { var level string if level, err = getPreallocatedLevel(namespace); err != nil { - return nil, namespace, fmt.Errorf("unable to find pre-allocated mcs annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) + return nil, fmt.Errorf("unable to find pre-allocated mcs annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) } if constraint.SELinuxContext.SELinuxOptions == nil { @@ -232,17 +238,17 @@ func CreateProviderFromConstraint(ns string, namespace *corev1.Namespace, constr } constraint.SELinuxContext.SELinuxOptions.Level = level } - if resolveFSGroup { + if requiresPreallocatedFSGroup(constraint) { fsGroup, err := getPreallocatedFSGroup(namespace) if err != nil { - return nil, namespace, fmt.Errorf("unable to find pre-allocated group annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) + return nil, fmt.Errorf("unable to find pre-allocated group annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) } constraint.FSGroup.Ranges = fsGroup } - if resolveSupplementalGroups { + if requiresPreallocatedSupplementalGroups(constraint) { supplementalGroups, err := getPreallocatedSupplementalGroups(namespace) if err != nil { - return nil, namespace, fmt.Errorf("unable to find pre-allocated group annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) + return nil, fmt.Errorf("unable to find pre-allocated group annotation for namespace %s while trying to configure SCC %s: %v", namespace.Name, constraint.Name, err) } constraint.SupplementalGroups.Ranges = supplementalGroups } @@ -250,9 +256,9 @@ func CreateProviderFromConstraint(ns string, namespace *corev1.Namespace, constr // Create the provider provider, err := NewSimpleProvider(constraint) if err != nil { - return nil, namespace, fmt.Errorf("error creating provider for SCC %s in namespace %s: %v", constraint.Name, ns, err) + return nil, fmt.Errorf("error creating provider for SCC %s in namespace %s: %v", constraint.Name, namespace.GetName(), err) } - return provider, namespace, nil + return provider, nil } // getPreallocatedUIDRange retrieves the annotated value from the namespace, splits it to make diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/provider.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/provider.go index 10866dc85c6e..6be2e7073a9c 100644 --- a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/provider.go +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccmatching/provider.go @@ -11,6 +11,7 @@ import ( "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user" sccutil "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl" @@ -273,7 +274,7 @@ func (s *simpleProvider) ValidatePodSecurityContext(pod *api.Pod, fldPath *field continue } - if !allowedVolumes.Has(string(fsType)) { + if !allowsVolumeType(allowedVolumes, fsType, v.VolumeSource) { allErrs = append(allErrs, field.Invalid( field.NewPath("spec", "volumes").Index(i), string(fsType), fmt.Sprintf("%s volumes are not allowed to be used", string(fsType)))) @@ -467,3 +468,21 @@ func createSeccompStrategy(allowedProfiles []string) (seccomp.SeccompStrategy, e func createSysctlsStrategy(safeWhitelist, allowedUnsafeSysctls, forbiddenSysctls []string) (sysctl.SysctlsStrategy, error) { return sysctl.NewMustMatchPatterns(safeWhitelist, allowedUnsafeSysctls, forbiddenSysctls), nil } + +// allowsVolumeType determines whether the type and volume are valid +// given the volumes allowed by an scc. +// +// This function was derived from a psp function of the same name in +// pkg/security/podsecuritypolicy/provider.go and updated for scc +// compatibility. +func allowsVolumeType(allowedVolumes sets.String, fsType securityv1.FSType, volumeSource api.VolumeSource) bool { + if allowedVolumes.Has(string(fsType)) { + return true + } + + // If secret volumes are allowed by the scc, allow the projected + // volume sources that bound service account token volumes expose. + return allowedVolumes.Has(string(securityv1.FSTypeSecret)) && + fsType == securityv1.FSProjected && + sccutil.IsOnlyServiceAccountTokenSources(volumeSource.Projected) +} diff --git a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/util.go b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/util.go index 9e4a5cfdbc98..c8bbfec777b5 100644 --- a/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/util.go +++ b/vendor/github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/util.go @@ -176,3 +176,68 @@ func EqualStringSlices(a, b []string) bool { } return true } + +// IsOnlyServiceAccountTokenSources returns true if the sources of the projected volume +// source match to what would be injected by the ServiceAccount volume projection controller +// +// This function is derived from pkg/security/podsecuritypolicy/util/util.go with the +// addition of OpenShift-specific "openshift-service-ca.crt" ConfigMap source. +// +// This is what a sample injected volume looks like: +// - projected: +// defaultMode: 420 +// sources: +// - serviceAccountToken: +// expirationSeconds: 3607 +// path: token +// - configMap: +// name: kube-root-ca.crt +// items: +// - key: ca.crt +// path: ca.crt +// - downwardAPI: +// items: +// - path: namespace +// fieldRef: +// apiVersion: v1 +// fieldPath: metadata.namespace +// - configMap: +// name: openshift-service-ca.crt +// items: +// - key: service-ca.crt +// path: service-ca.crt +func IsOnlyServiceAccountTokenSources(v *api.ProjectedVolumeSource) bool { + for _, s := range v.Sources { + // reject any projected source that does not match any of our expected source types + if s.ServiceAccountToken == nil && s.ConfigMap == nil && s.DownwardAPI == nil { + return false + } + if t := s.ServiceAccountToken; t != nil && (t.Path != "token" || t.Audience != "") { + return false + } + + if s.ConfigMap != nil { + switch cmRef := s.ConfigMap.LocalObjectReference.Name; cmRef { + case "kube-root-ca.crt": + if len(s.ConfigMap.Items) != 1 || s.ConfigMap.Items[0].Key != "ca.crt" || s.ConfigMap.Items[0].Path != "ca.crt" { + return false + } + case "openshift-service-ca.crt": + if len(s.ConfigMap.Items) != 1 || s.ConfigMap.Items[0].Key != "service-ca.crt" || s.ConfigMap.Items[0].Path != "service-ca.crt" { + return false + } + default: + return false + } + } + + if s.DownwardAPI != nil { + for _, d := range s.DownwardAPI.Items { + if d.Path != "namespace" || d.FieldRef == nil || d.FieldRef.APIVersion != "v1" || d.FieldRef.FieldPath != "metadata.namespace" { + return false + } + } + } + } + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go new file mode 100644 index 000000000000..b70da9548195 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go @@ -0,0 +1,26 @@ +package factory + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +func ObjectNameToKey(obj runtime.Object) string { + metaObj, ok := obj.(metav1.ObjectMetaAccessor) + if !ok { + return "" + } + return metaObj.GetObjectMeta().GetName() +} + +func NamesFilter(names ...string) EventFilterFunc { + nameSet := sets.NewString(names...) + return func(obj interface{}) bool { + metaObj, ok := obj.(metav1.ObjectMetaAccessor) + if !ok { + return false + } + return nameSet.Has(metaObj.GetObjectMeta().GetName()) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go index b13453923e3d..a8ecc375ebe4 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go @@ -4,6 +4,7 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -17,6 +18,8 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + migrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset" "github.com/openshift/api" "github.com/openshift/library-go/pkg/operator/events" @@ -33,7 +36,9 @@ func init() { utilruntime.Must(api.InstallKube(genericScheme)) utilruntime.Must(apiextensionsv1beta1.AddToScheme(genericScheme)) utilruntime.Must(apiextensionsv1.AddToScheme(genericScheme)) - + utilruntime.Must(migrationv1alpha1.AddToScheme(genericScheme)) + // TODO: remove once openshift/api/pull/929 is merged + utilruntime.Must(policyv1.AddToScheme(genericScheme)) } type AssetFunc func(name string) ([]byte, error) @@ -51,6 +56,7 @@ type ClientHolder struct { apiExtensionsClient apiextensionsclient.Interface kubeInformers v1helpers.KubeInformersForNamespaces dynamicClient dynamic.Interface + migrationClient migrationclient.Interface } func NewClientHolder() *ClientHolder { @@ -81,6 +87,11 @@ func (c *ClientHolder) WithDynamicClient(client dynamic.Interface) *ClientHolder return c } +func (c *ClientHolder) WithMigrationClient(client migrationclient.Interface) *ClientHolder { + c.migrationClient = client + return c +} + // ApplyDirectly applies the given manifest files to API server. func ApplyDirectly(clients *ClientHolder, recorder events.Recorder, manifests AssetFunc, files ...string) []ApplyResult { ret := []ApplyResult{} @@ -165,6 +176,12 @@ func ApplyDirectly(clients *ClientHolder, recorder events.Recorder, manifests As } else { result.Result, result.Changed, result.Error = ApplyRoleBinding(clients.kubeClient.RbacV1(), recorder, t) } + case *policyv1.PodDisruptionBudget: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyPodDisruptionBudget(clients.kubeClient.PolicyV1(), recorder, t) + } case *apiextensionsv1beta1.CustomResourceDefinition: if clients.apiExtensionsClient == nil { result.Error = fmt.Errorf("missing apiExtensionsClient") @@ -189,6 +206,12 @@ func ApplyDirectly(clients *ClientHolder, recorder events.Recorder, manifests As } else { result.Result, result.Changed, result.Error = ApplyCSIDriver(clients.kubeClient.StorageV1(), recorder, t) } + case *migrationv1alpha1.StorageVersionMigration: + if clients.migrationClient == nil { + result.Error = fmt.Errorf("missing migrationClient") + } else { + result.Result, result.Changed, result.Error = ApplyStorageVersionMigration(clients.migrationClient, recorder, t) + } case *unstructured.Unstructured: if clients.dynamicClient == nil { result.Error = fmt.Errorf("missing dynamicClient") diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go new file mode 100644 index 000000000000..fccd3ff23e29 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go @@ -0,0 +1,46 @@ +package resourceapply + +import ( + "context" + "reflect" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + migrationclientv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset" +) + +// ApplyStorageVersionMigration merges objectmeta and required data. +func ApplyStorageVersionMigration(client migrationclientv1alpha1.Interface, recorder events.Recorder, required *migrationv1alpha1.StorageVersionMigration) (*migrationv1alpha1.StorageVersionMigration, bool, error) { + clientInterface := client.MigrationV1alpha1().StorageVersionMigrations() + existing, err := clientInterface.Get(context.Background(), required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := clientInterface.Create(context.Background(), resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*v1alpha1.StorageVersionMigration), metav1.CreateOptions{}) + reportCreateEvent(recorder, requiredCopy, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified && reflect.DeepEqual(existingCopy.Spec, required.Spec) { + return existingCopy, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("StorageVersionMigration %q changes: %v", required.Name, JSONPatchNoError(existing, required)) + } + + required.Spec.Resource.DeepCopyInto(&existingCopy.Spec.Resource) + actual, err := clientInterface.Update(context.Background(), existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go new file mode 100644 index 000000000000..ecd0eb6bf8d4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go @@ -0,0 +1,47 @@ +package resourceapply + +import ( + "context" + + policyv1 "k8s.io/api/policy/v1" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + policyclientv1 "k8s.io/client-go/kubernetes/typed/policy/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +func ApplyPodDisruptionBudget(client policyclientv1.PodDisruptionBudgetsGetter, recorder events.Recorder, required *policyv1.PodDisruptionBudget) (*policyv1.PodDisruptionBudget, bool, error) { + existing, err := client.PodDisruptionBudgets(required.Namespace).Get(context.TODO(), required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.PodDisruptionBudgets(required.Namespace).Create(context.TODO(), required, metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy.Spec, required.Spec) + if contentSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Spec = required.Spec + + if klog.V(4).Enabled() { + klog.Infof("PodDisruptionBudget %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.PodDisruptionBudgets(required.Namespace).Update(context.TODO(), existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go new file mode 100644 index 000000000000..71b6074c9231 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go @@ -0,0 +1,26 @@ +package resourceread + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" +) + +var ( + migrationScheme = runtime.NewScheme() + migrationCodecs = serializer.NewCodecFactory(migrationScheme) +) + +func init() { + if err := migrationv1alpha1.AddToScheme(migrationScheme); err != nil { + panic(err) + } +} + +func ReadStorageVersionMigrationV1Alpha1OrDie(objBytes []byte) *migrationv1alpha1.StorageVersionMigration { + requiredObj, err := runtime.Decode(migrationCodecs.UniversalDecoder(migrationv1alpha1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*migrationv1alpha1.StorageVersionMigration) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go new file mode 100644 index 000000000000..fe058fdc6e63 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go @@ -0,0 +1,25 @@ +package resourceread + +import ( + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var ( + policyScheme = runtime.NewScheme() + policyCodecs = serializer.NewCodecFactory(policyScheme) +) + +func init() { + utilruntime.Must(policyv1.AddToScheme(policyScheme)) +} + +func ReadPodDisruptionBudgetV1OrDie(objBytes []byte) *policyv1.PodDisruptionBudget { + requiredObj, err := runtime.Decode(policyCodecs.UniversalDecoder(policyv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*policyv1.PodDisruptionBudget) +} diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml index 5e20aa4140c3..c1dbd5a3a3e2 100644 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -4,14 +4,12 @@ git: depth: 1 env: - GO111MODULE=on -go: [1.13.x, 1.14.x] -os: [linux, osx] +go: 1.15.x +os: linux install: - ./travis/install.sh script: - - ./travis/cross_build.sh - - ./travis/lint.sh - - export GOMAXPROCS=4 - - export GORACE=halt_on_error=1 - - go test -race -v ./... - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index 584026d67caa..7567f612898c 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,39 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + +# 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + +Code quality: + * use go 1.15 in travis + * use magefile as task runner + +Fixes: + * small fixes about new go 1.13 error formatting system + * Fix for long time race condiction with mutating data hooks + +Features: + * build support for zos + +# 1.7.0 +Fixes: + * the dependency toward a windows terminal library has been removed + +Features: + * a new buffer pool management API has been added + * a set of `Fn()` functions have been added + # 1.6.0 Fixes: * end of line cleanup diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index 5796706dbfa2..5152b6aa406f 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -402,7 +402,7 @@ func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { // source of the official loggers. serialized, err := json.Marshal(entry.Data) if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) } return append(serialized, '\n'), nil } diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 5a5cbfe7c897..07a1e5fa7249 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -78,6 +78,14 @@ func NewEntry(logger *Logger) *Entry { } } +func (entry *Entry) Dup() *Entry { + data := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} +} + // Returns the bytes representation of this entry from the formatter. func (entry *Entry) Bytes() ([]byte, error) { return entry.Logger.Formatter.Format(entry) @@ -123,11 +131,9 @@ func (entry *Entry) WithFields(fields Fields) *Entry { for k, v := range fields { isErrField := false if t := reflect.TypeOf(v); t != nil { - switch t.Kind() { - case reflect.Func: + switch { + case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: isErrField = true - case reflect.Ptr: - isErrField = t.Elem().Kind() == reflect.Func } } if isErrField { @@ -212,68 +218,72 @@ func (entry Entry) HasCaller() (has bool) { entry.Caller != nil } -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { +func (entry *Entry) log(level Level, msg string) { var buffer *bytes.Buffer - // Default to now, but allow users to override if they want. - // - // We don't have to worry about polluting future calls to Entry#log() - // with this assignment because this function is declared with a - // non-pointer receiver. - if entry.Time.IsZero() { - entry.Time = time.Now() + newEntry := entry.Dup() + + if newEntry.Time.IsZero() { + newEntry.Time = time.Now() } - entry.Level = level - entry.Message = msg - entry.Logger.mu.Lock() - if entry.Logger.ReportCaller { - entry.Caller = getCaller() + newEntry.Level = level + newEntry.Message = msg + + newEntry.Logger.mu.Lock() + reportCaller := newEntry.Logger.ReportCaller + newEntry.Logger.mu.Unlock() + + if reportCaller { + newEntry.Caller = getCaller() } - entry.Logger.mu.Unlock() - entry.fireHooks() + newEntry.fireHooks() buffer = getBuffer() defer func() { - entry.Buffer = nil + newEntry.Buffer = nil putBuffer(buffer) }() buffer.Reset() - entry.Buffer = buffer + newEntry.Buffer = buffer - entry.write() + newEntry.write() - entry.Buffer = nil + newEntry.Buffer = nil // To avoid Entry#log() returning a value that only would make sense for // panic() to use in Entry#Panic(), we avoid the allocation by checking // directly here. if level <= PanicLevel { - panic(&entry) + panic(newEntry) } } func (entry *Entry) fireHooks() { + var tmpHooks LevelHooks entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - err := entry.Logger.Hooks.Fire(entry.Level, entry) + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) } } func (entry *Entry) write() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } - if _, err = entry.Logger.Out.Write(serialized); err != nil { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + if _, err := entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } @@ -319,7 +329,6 @@ func (entry *Entry) Fatal(args ...interface{}) { func (entry *Entry) Panic(args ...interface{}) { entry.Log(PanicLevel, args...) - panic(fmt.Sprint(args...)) } // Entry Printf family functions diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum index 1edc143bed02..694c18b84546 100644 --- a/vendor/github.com/sirupsen/logrus/go.sum +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -4,7 +4,5 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go index ba7f237112bd..c96dc5636bf0 100644 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -23,6 +23,9 @@ func (f FieldMap) resolve(key fieldKey) string { // JSONFormatter formats logs into parsable json type JSONFormatter struct { // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. TimestampFormat string // DisableTimestamp allows disabling automatic timestamps in output @@ -118,7 +121,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { encoder.SetIndent("", " ") } if err := encoder.Encode(data); err != nil { - return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) } return b.Bytes(), nil diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index dbf627c97541..337704457a28 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -12,7 +12,7 @@ import ( // LogFunction For big messages, it can be more efficient to pass a function // and only call it if the log level is actually enables rather than // generating the log message and then checking if the level is enabled -type LogFunction func()[]interface{} +type LogFunction func() []interface{} type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go index cc4fe6e31776..04748b8515f1 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -1,4 +1,4 @@ -// +build linux aix +// +build linux aix zos // +build !js package logrus diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index 3c28b54cabae..be2c6efe5ed0 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -53,7 +53,10 @@ type TextFormatter struct { // the time passed since beginning of execution. FullTimestamp bool - // TimestampFormat to use for display when a full timestamp is printed + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. TimestampFormat string // The fields are sorted by default for a consistent output. For applications @@ -235,6 +238,8 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelColor = yellow case ErrorLevel, FatalLevel, PanicLevel: levelColor = red + case InfoLevel: + levelColor = blue default: levelColor = blue } diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go index eb3157f0b20e..e15b7bf6a7c7 100644 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go +++ b/vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go @@ -57,7 +57,7 @@ loop: err = transform.ErrShortSrc break loop } - r = utf8.RuneError + r, size = utf8.RuneError, 1 goto write } size = 2 diff --git a/vendor/golang.org/x/text/internal/language/language.go b/vendor/golang.org/x/text/internal/language/language.go index 1e74d1affd27..f41aedcfc8aa 100644 --- a/vendor/golang.org/x/text/internal/language/language.go +++ b/vendor/golang.org/x/text/internal/language/language.go @@ -303,9 +303,17 @@ func (t Tag) Extensions() []string { // are of the allowed values defined for the Unicode locale extension ('u') in // https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. // TypeForKey will traverse the inheritance chain to get the correct value. +// +// If there are multiple types associated with a key, only the first will be +// returned. If there is no type associated with a key, it returns the empty +// string. func (t Tag) TypeForKey(key string) string { - if start, end, _ := t.findTypeForKey(key); end != start { - return t.str[start:end] + if _, start, end, _ := t.findTypeForKey(key); end != start { + s := t.str[start:end] + if p := strings.IndexByte(s, '-'); p >= 0 { + s = s[:p] + } + return s } return "" } @@ -329,13 +337,13 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { // Remove the setting if value is "". if value == "" { - start, end, _ := t.findTypeForKey(key) - if start != end { - // Remove key tag and leading '-'. - start -= 4 - + start, sep, end, _ := t.findTypeForKey(key) + if start != sep { // Remove a possible empty extension. - if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' { + switch { + case t.str[start-2] != '-': // has previous elements. + case end == len(t.str), // end of string + end+2 < len(t.str) && t.str[end+2] == '-': // end of extension start -= 2 } if start == int(t.pVariant) && end == len(t.str) { @@ -381,14 +389,14 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { t.str = string(buf[:uStart+len(b)]) } else { s := t.str - start, end, hasExt := t.findTypeForKey(key) - if start == end { + start, sep, end, hasExt := t.findTypeForKey(key) + if start == sep { if hasExt { b = b[2:] } - t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:]) + t.str = fmt.Sprintf("%s-%s%s", s[:sep], b, s[end:]) } else { - t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:]) + t.str = fmt.Sprintf("%s-%s%s", s[:start+3], value, s[end:]) } } return t, nil @@ -399,10 +407,10 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { // wasn't found. The hasExt return value reports whether an -u extension was present. // Note: the extensions are typically very small and are likely to contain // only one key-type pair. -func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) { +func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) { p := int(t.pExt) if len(key) != 2 || p == len(t.str) || p == 0 { - return p, p, false + return p, p, p, false } s := t.str @@ -410,10 +418,10 @@ func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) { for p++; s[p] != 'u'; p++ { if s[p] > 'u' { p-- - return p, p, false + return p, p, p, false } if p = nextExtension(s, p); p == len(s) { - return len(s), len(s), false + return len(s), len(s), len(s), false } } // Proceed to the hyphen following the extension name. @@ -424,40 +432,28 @@ func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) { // Iterate over keys until we get the end of a section. for { - // p points to the hyphen preceding the current token. - if p3 := p + 3; s[p3] == '-' { - // Found a key. - // Check whether we just processed the key that was requested. - if curKey == key { - return start, p, true - } - // Set to the next key and continue scanning type tokens. - curKey = s[p+1 : p3] - if curKey > key { - return p, p, true - } - // Start of the type token sequence. - start = p + 4 - // A type is at least 3 characters long. - p += 7 // 4 + 3 - } else { - // Attribute or type, which is at least 3 characters long. - p += 4 - } - // p points past the third character of a type or attribute. - max := p + 5 // maximum length of token plus hyphen. - if len(s) < max { - max = len(s) + end = p + for p++; p < len(s) && s[p] != '-'; p++ { } - for ; p < max && s[p] != '-'; p++ { + n := p - end - 1 + if n <= 2 && curKey == key { + if sep < end { + sep++ + } + return start, sep, end, true } - // Bail if we have exhausted all tokens or if the next token starts - // a new extension. - if p == len(s) || s[p+2] == '-' { - if curKey == key { - return start, p, true + switch n { + case 0, // invalid string + 1: // next extension + return end, end, end, true + case 2: + // next key + curKey = s[end+1 : p] + if curKey > key { + return end, end, end, true } - return p, p, true + start = end + sep = p } } } diff --git a/vendor/golang.org/x/text/internal/language/parse.go b/vendor/golang.org/x/text/internal/language/parse.go index 2be83e1da542..c696fd0bd867 100644 --- a/vendor/golang.org/x/text/internal/language/parse.go +++ b/vendor/golang.org/x/text/internal/language/parse.go @@ -133,14 +133,15 @@ func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { s.start = oldStart if end := oldStart + newSize; end != oldEnd { diff := end - oldEnd - if end < cap(s.b) { - b := make([]byte, len(s.b)+diff) + var b []byte + if n := len(s.b) + diff; n > cap(s.b) { + b = make([]byte, n) copy(b, s.b[:oldStart]) - copy(b[end:], s.b[oldEnd:]) - s.b = b } else { - s.b = append(s.b[end:], s.b[oldEnd:]...) + b = s.b[:n] } + copy(b[end:], s.b[oldEnd:]) + s.b = b s.next = end + (s.next - s.end) s.end = end } @@ -482,7 +483,7 @@ func parseExtensions(scan *scanner) int { func parseExtension(scan *scanner) int { start, end := scan.start, scan.end switch scan.token[0] { - case 'u': + case 'u': // https://www.ietf.org/rfc/rfc6067.txt attrStart := end scan.scan() for last := []byte{}; len(scan.token) > 2; scan.scan() { @@ -502,27 +503,29 @@ func parseExtension(scan *scanner) int { last = scan.token end = scan.end } + // Scan key-type sequences. A key is of length 2 and may be followed + // by 0 or more "type" subtags from 3 to the maximum of 8 letters. var last, key []byte for attrEnd := end; len(scan.token) == 2; last = key { key = scan.token - keyEnd := scan.end - end = scan.acceptMinSize(3) + end = scan.end + for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { + end = scan.end + } // TODO: check key value validity - if keyEnd == end || bytes.Compare(key, last) != 1 { + if bytes.Compare(key, last) != 1 || scan.err != nil { // We have an invalid key or the keys are not sorted. // Start scanning keys from scratch and reorder. p := attrEnd + 1 scan.next = p keys := [][]byte{} for scan.scan(); len(scan.token) == 2; { - keyStart, keyEnd := scan.start, scan.end - end = scan.acceptMinSize(3) - if keyEnd != end { - keys = append(keys, scan.b[keyStart:end]) - } else { - scan.setError(ErrSyntax) - end = keyStart + keyStart := scan.start + end = scan.end + for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { + end = scan.end } + keys = append(keys, scan.b[keyStart:end]) } sort.Stable(bytesSort{keys, 2}) if n := len(keys); n > 0 { @@ -546,7 +549,7 @@ func parseExtension(scan *scanner) int { break } } - case 't': + case 't': // https://www.ietf.org/rfc/rfc6497.txt scan.scan() if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { _, end = parseTag(scan) diff --git a/vendor/golang.org/x/text/language/go1_1.go b/vendor/golang.org/x/text/language/go1_1.go index 380f4c09f7f2..c7435583b5f2 100644 --- a/vendor/golang.org/x/text/language/go1_1.go +++ b/vendor/golang.org/x/text/language/go1_1.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.2 // +build !go1.2 package language diff --git a/vendor/golang.org/x/text/language/go1_2.go b/vendor/golang.org/x/text/language/go1_2.go index 38268c57a373..77aaaa299eb1 100644 --- a/vendor/golang.org/x/text/language/go1_2.go +++ b/vendor/golang.org/x/text/language/go1_2.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.2 // +build go1.2 package language diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go index abfa17f66db1..289b3a36d524 100644 --- a/vendor/golang.org/x/text/language/language.go +++ b/vendor/golang.org/x/text/language/language.go @@ -412,6 +412,10 @@ func (t Tag) Extensions() []Extension { // are of the allowed values defined for the Unicode locale extension ('u') in // https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. // TypeForKey will traverse the inheritance chain to get the correct value. +// +// If there are multiple types associated with a key, only the first will be +// returned. If there is no type associated with a key, it returns the empty +// string. func (t Tag) TypeForKey(key string) string { if !compact.Tag(t).MayHaveExtensions() { if key != "rg" && key != "va" { diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go index 87e58a02a089..96b57f610adf 100644 --- a/vendor/golang.org/x/text/language/tables.go +++ b/vendor/golang.org/x/text/language/tables.go @@ -47,7 +47,7 @@ const ( _Zzzz = 251 ) -var regionToGroups = []uint8{ // 357 elements +var regionToGroups = []uint8{ // 358 elements // Entry 0 - 3F 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00, @@ -98,8 +98,8 @@ var regionToGroups = []uint8{ // 357 elements 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, -} // Size: 381 bytes + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +} // Size: 382 bytes var paradigmLocales = [][3]uint16{ // 3 elements 0: [3]uint16{0x139, 0x0, 0x7b}, @@ -295,4 +295,4 @@ var matchRegion = []regionIntelligibility{ // 15 elements 14: {lang: 0x529, script: 0x3c, group: 0x80, distance: 0x5}, } // Size: 114 bytes -// Total table size 1471 bytes (1KiB); checksum: 4CB1CD46 +// Total table size 1472 bytes (1KiB); checksum: F86C669 diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index e4c62289f90d..8a7392c4a162 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.10 // +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index 02b9e1e9d4c2..bb0a920018c8 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.10 // +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/unicode/bidi/bidi.go b/vendor/golang.org/x/text/unicode/bidi/bidi.go index e8edc54cc28d..fd057601bd91 100644 --- a/vendor/golang.org/x/text/unicode/bidi/bidi.go +++ b/vendor/golang.org/x/text/unicode/bidi/bidi.go @@ -12,15 +12,14 @@ // and without notice. package bidi // import "golang.org/x/text/unicode/bidi" -// TODO: -// The following functionality would not be hard to implement, but hinges on -// the definition of a Segmenter interface. For now this is up to the user. -// - Iterate over paragraphs -// - Segmenter to iterate over runs directly from a given text. -// Also: +// TODO // - Transformer for reordering? // - Transformer (validator, really) for Bidi Rule. +import ( + "bytes" +) + // This API tries to avoid dealing with embedding levels for now. Under the hood // these will be computed, but the question is to which extent the user should // know they exist. We should at some point allow the user to specify an @@ -49,7 +48,9 @@ const ( Neutral ) -type options struct{} +type options struct { + defaultDirection Direction +} // An Option is an option for Bidi processing. type Option func(*options) @@ -66,12 +67,62 @@ type Option func(*options) // DefaultDirection sets the default direction for a Paragraph. The direction is // overridden if the text contains directional characters. func DefaultDirection(d Direction) Option { - panic("unimplemented") + return func(opts *options) { + opts.defaultDirection = d + } } // A Paragraph holds a single Paragraph for Bidi processing. type Paragraph struct { - // buffers + p []byte + o Ordering + opts []Option + types []Class + pairTypes []bracketType + pairValues []rune + runes []rune + options options +} + +// Initialize the p.pairTypes, p.pairValues and p.types from the input previously +// set by p.SetBytes() or p.SetString(). Also limit the input up to (and including) a paragraph +// separator (bidi class B). +// +// The function p.Order() needs these values to be set, so this preparation could be postponed. +// But since the SetBytes and SetStrings functions return the length of the input up to the paragraph +// separator, the whole input needs to be processed anyway and should not be done twice. +// +// The function has the same return values as SetBytes() / SetString() +func (p *Paragraph) prepareInput() (n int, err error) { + p.runes = bytes.Runes(p.p) + bytecount := 0 + // clear slices from previous SetString or SetBytes + p.pairTypes = nil + p.pairValues = nil + p.types = nil + + for _, r := range p.runes { + props, i := LookupRune(r) + bytecount += i + cls := props.Class() + if cls == B { + return bytecount, nil + } + p.types = append(p.types, cls) + if props.IsOpeningBracket() { + p.pairTypes = append(p.pairTypes, bpOpen) + p.pairValues = append(p.pairValues, r) + } else if props.IsBracket() { + // this must be a closing bracket, + // since IsOpeningBracket is not true + p.pairTypes = append(p.pairTypes, bpClose) + p.pairValues = append(p.pairValues, r) + } else { + p.pairTypes = append(p.pairTypes, bpNone) + p.pairValues = append(p.pairValues, 0) + } + } + return bytecount, nil } // SetBytes configures p for the given paragraph text. It replaces text @@ -80,70 +131,150 @@ type Paragraph struct { // consumed from b including this separator. Error may be non-nil if options are // given. func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) { - panic("unimplemented") + p.p = b + p.opts = opts + return p.prepareInput() } -// SetString configures p for the given paragraph text. It replaces text -// previously set by SetBytes or SetString. If b contains a paragraph separator +// SetString configures s for the given paragraph text. It replaces text +// previously set by SetBytes or SetString. If s contains a paragraph separator // it will only process the first paragraph and report the number of bytes -// consumed from b including this separator. Error may be non-nil if options are +// consumed from s including this separator. Error may be non-nil if options are // given. func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) { - panic("unimplemented") + p.p = []byte(s) + p.opts = opts + return p.prepareInput() } // IsLeftToRight reports whether the principle direction of rendering for this // paragraphs is left-to-right. If this returns false, the principle direction // of rendering is right-to-left. func (p *Paragraph) IsLeftToRight() bool { - panic("unimplemented") + return p.Direction() == LeftToRight } // Direction returns the direction of the text of this paragraph. // // The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. func (p *Paragraph) Direction() Direction { - panic("unimplemented") + return p.o.Direction() } +// TODO: what happens if the position is > len(input)? This should return an error. + // RunAt reports the Run at the given position of the input text. // // This method can be used for computing line breaks on paragraphs. func (p *Paragraph) RunAt(pos int) Run { - panic("unimplemented") + c := 0 + runNumber := 0 + for i, r := range p.o.runes { + c += len(r) + if pos < c { + runNumber = i + } + } + return p.o.Run(runNumber) +} + +func calculateOrdering(levels []level, runes []rune) Ordering { + var curDir Direction + + prevDir := Neutral + prevI := 0 + + o := Ordering{} + // lvl = 0,2,4,...: left to right + // lvl = 1,3,5,...: right to left + for i, lvl := range levels { + if lvl%2 == 0 { + curDir = LeftToRight + } else { + curDir = RightToLeft + } + if curDir != prevDir { + if i > 0 { + o.runes = append(o.runes, runes[prevI:i]) + o.directions = append(o.directions, prevDir) + o.startpos = append(o.startpos, prevI) + } + prevI = i + prevDir = curDir + } + } + o.runes = append(o.runes, runes[prevI:]) + o.directions = append(o.directions, prevDir) + o.startpos = append(o.startpos, prevI) + return o } // Order computes the visual ordering of all the runs in a Paragraph. func (p *Paragraph) Order() (Ordering, error) { - panic("unimplemented") + if len(p.types) == 0 { + return Ordering{}, nil + } + + for _, fn := range p.opts { + fn(&p.options) + } + lvl := level(-1) + if p.options.defaultDirection == RightToLeft { + lvl = 1 + } + para, err := newParagraph(p.types, p.pairTypes, p.pairValues, lvl) + if err != nil { + return Ordering{}, err + } + + levels := para.getLevels([]int{len(p.types)}) + + p.o = calculateOrdering(levels, p.runes) + return p.o, nil } // Line computes the visual ordering of runs for a single line starting and // ending at the given positions in the original text. func (p *Paragraph) Line(start, end int) (Ordering, error) { - panic("unimplemented") + lineTypes := p.types[start:end] + para, err := newParagraph(lineTypes, p.pairTypes[start:end], p.pairValues[start:end], -1) + if err != nil { + return Ordering{}, err + } + levels := para.getLevels([]int{len(lineTypes)}) + o := calculateOrdering(levels, p.runes[start:end]) + return o, nil } // An Ordering holds the computed visual order of runs of a Paragraph. Calling // SetBytes or SetString on the originating Paragraph invalidates an Ordering. // The methods of an Ordering should only be called by one goroutine at a time. -type Ordering struct{} +type Ordering struct { + runes [][]rune + directions []Direction + startpos []int +} // Direction reports the directionality of the runs. // // The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. func (o *Ordering) Direction() Direction { - panic("unimplemented") + return o.directions[0] } // NumRuns returns the number of runs. func (o *Ordering) NumRuns() int { - panic("unimplemented") + return len(o.runes) } // Run returns the ith run within the ordering. func (o *Ordering) Run(i int) Run { - panic("unimplemented") + r := Run{ + runes: o.runes[i], + direction: o.directions[i], + startpos: o.startpos[i], + } + return r } // TODO: perhaps with options. @@ -155,16 +286,19 @@ func (o *Ordering) Run(i int) Run { // A Run is a continuous sequence of characters of a single direction. type Run struct { + runes []rune + direction Direction + startpos int } // String returns the text of the run in its original order. func (r *Run) String() string { - panic("unimplemented") + return string(r.runes) } // Bytes returns the text of the run in its original order. func (r *Run) Bytes() []byte { - panic("unimplemented") + return []byte(r.String()) } // TODO: methods for @@ -174,25 +308,52 @@ func (r *Run) Bytes() []byte { // Direction reports the direction of the run. func (r *Run) Direction() Direction { - panic("unimplemented") + return r.direction } -// Position of the Run within the text passed to SetBytes or SetString of the +// Pos returns the position of the Run within the text passed to SetBytes or SetString of the // originating Paragraph value. func (r *Run) Pos() (start, end int) { - panic("unimplemented") + return r.startpos, r.startpos + len(r.runes) - 1 } // AppendReverse reverses the order of characters of in, appends them to out, // and returns the result. Modifiers will still follow the runes they modify. // Brackets are replaced with their counterparts. func AppendReverse(out, in []byte) []byte { - panic("unimplemented") + ret := make([]byte, len(in)+len(out)) + copy(ret, out) + inRunes := bytes.Runes(in) + + for i, r := range inRunes { + prop, _ := LookupRune(r) + if prop.IsBracket() { + inRunes[i] = prop.reverseBracket(r) + } + } + + for i, j := 0, len(inRunes)-1; i < j; i, j = i+1, j-1 { + inRunes[i], inRunes[j] = inRunes[j], inRunes[i] + } + copy(ret[len(out):], string(inRunes)) + + return ret } // ReverseString reverses the order of characters in s and returns a new string. // Modifiers will still follow the runes they modify. Brackets are replaced with // their counterparts. func ReverseString(s string) string { - panic("unimplemented") + input := []rune(s) + li := len(input) + ret := make([]rune, li) + for i, r := range input { + prop, _ := LookupRune(r) + if prop.IsBracket() { + ret[li-i-1] = prop.reverseBracket(r) + } else { + ret[li-i-1] = r + } + } + return string(ret) } diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 50deb6600a3c..e4c0811016c2 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -4,7 +4,10 @@ package bidi -import "log" +import ( + "fmt" + "log" +) // This implementation is a port based on the reference implementation found at: // https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ @@ -97,13 +100,20 @@ type paragraph struct { // rune (suggested is the rune of the open bracket for opening and matching // close brackets, after normalization). The embedding levels are optional, but // may be supplied to encode embedding levels of styled text. -// -// TODO: return an error. -func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) *paragraph { - validateTypes(types) - validatePbTypes(pairTypes) - validatePbValues(pairValues, pairTypes) - validateParagraphEmbeddingLevel(levels) +func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) (*paragraph, error) { + var err error + if err = validateTypes(types); err != nil { + return nil, err + } + if err = validatePbTypes(pairTypes); err != nil { + return nil, err + } + if err = validatePbValues(pairValues, pairTypes); err != nil { + return nil, err + } + if err = validateParagraphEmbeddingLevel(levels); err != nil { + return nil, err + } p := ¶graph{ initialTypes: append([]Class(nil), types...), @@ -115,7 +125,7 @@ func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, lev resultTypes: append([]Class(nil), types...), } p.run() - return p + return p, nil } func (p *paragraph) Len() int { return len(p.initialTypes) } @@ -1001,58 +1011,61 @@ func typeForLevel(level level) Class { return R } -// TODO: change validation to not panic - -func validateTypes(types []Class) { +func validateTypes(types []Class) error { if len(types) == 0 { - log.Panic("types is null") + return fmt.Errorf("types is null") } for i, t := range types[:len(types)-1] { if t == B { - log.Panicf("B type before end of paragraph at index: %d", i) + return fmt.Errorf("B type before end of paragraph at index: %d", i) } } + return nil } -func validateParagraphEmbeddingLevel(embeddingLevel level) { +func validateParagraphEmbeddingLevel(embeddingLevel level) error { if embeddingLevel != implicitLevel && embeddingLevel != 0 && embeddingLevel != 1 { - log.Panicf("illegal paragraph embedding level: %d", embeddingLevel) + return fmt.Errorf("illegal paragraph embedding level: %d", embeddingLevel) } + return nil } -func validateLineBreaks(linebreaks []int, textLength int) { +func validateLineBreaks(linebreaks []int, textLength int) error { prev := 0 for i, next := range linebreaks { if next <= prev { - log.Panicf("bad linebreak: %d at index: %d", next, i) + return fmt.Errorf("bad linebreak: %d at index: %d", next, i) } prev = next } if prev != textLength { - log.Panicf("last linebreak was %d, want %d", prev, textLength) + return fmt.Errorf("last linebreak was %d, want %d", prev, textLength) } + return nil } -func validatePbTypes(pairTypes []bracketType) { +func validatePbTypes(pairTypes []bracketType) error { if len(pairTypes) == 0 { - log.Panic("pairTypes is null") + return fmt.Errorf("pairTypes is null") } for i, pt := range pairTypes { switch pt { case bpNone, bpOpen, bpClose: default: - log.Panicf("illegal pairType value at %d: %v", i, pairTypes[i]) + return fmt.Errorf("illegal pairType value at %d: %v", i, pairTypes[i]) } } + return nil } -func validatePbValues(pairValues []rune, pairTypes []bracketType) { +func validatePbValues(pairValues []rune, pairTypes []bracketType) error { if pairValues == nil { - log.Panic("pairValues is null") + return fmt.Errorf("pairValues is null") } if len(pairTypes) != len(pairValues) { - log.Panic("pairTypes is different length from pairValues") + return fmt.Errorf("pairTypes is different length from pairValues") } + return nil } diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index d8c94e1bd1a6..42fa8d72cec0 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 16b11db53883..56a0e1ea2165 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index 647f2d4279e6..baacf32b43c3 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index c937d0976feb..f248effae17b 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index 0ca0193ebe2d..f517fdb202a5 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index 26fbd55a1243..f5a0788277ff 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index 2c58f09baa49..cb7239c4377d 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 7e1ae096e5c0..11b27330017d 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index 9ea1b421407d..96a130d30e9e 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 942906929135..0175eae50aa6 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package norm diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go index decb8e480939..186b1d4efac5 100644 --- a/vendor/golang.org/x/text/width/tables10.0.0.go +++ b/vendor/golang.org/x/text/width/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package width diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go index 3c75e428fd0d..990f7622f175 100644 --- a/vendor/golang.org/x/text/width/tables11.0.0.go +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package width diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go index 543942b9e781..85296297e38c 100644 --- a/vendor/golang.org/x/text/width/tables12.0.0.go +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.14 && !go1.16 // +build go1.14,!go1.16 package width diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go index 804264ca67d1..bac3f1aee341 100644 --- a/vendor/golang.org/x/text/width/tables13.0.0.go +++ b/vendor/golang.org/x/text/width/tables13.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.16 // +build go1.16 package width diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go index 7069e26345b2..b3db84f6f9b6 100644 --- a/vendor/golang.org/x/text/width/tables9.0.0.go +++ b/vendor/golang.org/x/text/width/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package width diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 9bf4e8c17633..07da5db3450e 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -24,6 +24,7 @@ import ( ) // Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) } @@ -48,10 +49,11 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message using -// options in UnmarshalOptions object. It will clear the message first before -// setting the fields. If it returns an error, the given message may be -// partially set. +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { return o.unmarshal(b, m) } @@ -124,15 +126,6 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error { return d.unexpectedTokenError(tok) } - if err := d.unmarshalFields(m, skipTypeURL); err != nil { - return err - } - - return nil -} - -// unmarshalFields unmarshals the fields into the given protoreflect.Message. -func (d decoder) unmarshalFields(m pref.Message, skipTypeURL bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -170,7 +163,7 @@ func (d decoder) unmarshalFields(m pref.Message, skipTypeURL bool) error { if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { // Only extension names are in [name] format. extName := pref.FullName(name[1 : len(name)-1]) - extType, err := d.findExtension(extName) + extType, err := d.opts.Resolver.FindExtensionByName(extName) if err != nil && err != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) } @@ -184,17 +177,7 @@ func (d decoder) unmarshalFields(m pref.Message, skipTypeURL bool) error { // The name can either be the JSON name or the proto field name. fd = fieldDescs.ByJSONName(name) if fd == nil { - fd = fieldDescs.ByName(pref.Name(name)) - if fd == nil { - // The proto name of a group field is in all lowercase, - // while the textual field name is the group message name. - gd := fieldDescs.ByName(pref.Name(strings.ToLower(name))) - if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == pref.Name(name) { - fd = gd - } - } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != pref.Name(name) { - fd = nil // reset since field name is actually the message name - } + fd = fieldDescs.ByTextName(name) } } if flags.ProtoLegacy { @@ -257,15 +240,6 @@ func (d decoder) unmarshalFields(m pref.Message, skipTypeURL bool) error { } } -// findExtension returns protoreflect.ExtensionType from the resolver if found. -func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { - xt, err := d.opts.Resolver.FindExtensionByName(xtName) - if err == nil { - return xt, nil - } - return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) -} - func isKnownValue(fd pref.FieldDescriptor) bool { md := fd.Message() return md != nil && md.FullName() == genid.Value_message_fullname diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 7d6193300819..ba971f07810c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -7,15 +7,17 @@ package protojson import ( "encoding/base64" "fmt" - "sort" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -131,7 +133,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { } enc := encoder{internalEnc, o} - if err := enc.marshalMessage(m.ProtoReflect()); err != nil { + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { return nil, err } if o.AllowPartial { @@ -145,76 +147,94 @@ type encoder struct { opts MarshalOptions } -// marshalMessage marshals the given protoreflect.Message. -func (e encoder) marshalMessage(m pref.Message) error { - if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { - return marshal(e, m) - } +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} - e.StartObject() - defer e.EndObject() - if err := e.marshalFields(m); err != nil { - return err +func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) { + return } + m.FieldRanger.Range(f) +} - return nil +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct{ pref.Message } + +func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil + if isProto2Scalar || isSingularMessage { + v = pref.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) } -// marshalFields marshals the fields in the given protoreflect.Message. -func (e encoder) marshalFields(m pref.Message) error { - messageDesc := m.Descriptor() - if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m pref.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { return errors.New("no support for proto1 MessageSets") } - // Marshal out known fields. - fieldDescs := messageDesc.Fields() - for i := 0; i < fieldDescs.Len(); { - fd := fieldDescs.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - if fd == nil { - continue // unpopulated oneofs are not affected by EmitUnpopulated - } - } else { - i++ - } + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } - val := m.Get(fd) - if !m.Has(fd) { - if !e.opts.EmitUnpopulated { - continue - } - isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { - // Use invalid value to emit null. - val = pref.Value{} - } - } + e.StartObject() + defer e.EndObject() + var fields order.FieldRanger = m + if e.opts.EmitUnpopulated { + fields = unpopulatedFieldRanger{m} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool { name := fd.JSONName() if e.opts.UseProtoNames { - name = string(fd.Name()) - // Use type name for group field name. - if fd.Kind() == pref.GroupKind { - name = string(fd.Message().Name()) - } + name = fd.TextName() } - if err := e.WriteName(name); err != nil { - return err + + if err = e.WriteName(name); err != nil { + return false } - if err := e.marshalValue(val, fd); err != nil { - return err + if err = e.marshalValue(v, fd); err != nil { + return false } - } - - // Marshal out extensions. - if err := e.marshalExtensions(m); err != nil { - return err - } - return nil + return true + }) + return err } // marshalValue marshals the given protoreflect.Value. @@ -281,7 +301,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } case pref.MessageKind, pref.GroupKind: - if err := e.marshalMessage(val.Message()); err != nil { + if err := e.marshalMessage(val.Message(), ""); err != nil { return err } @@ -305,98 +325,20 @@ func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error { return nil } -type mapEntry struct { - key pref.MapKey - value pref.Value -} - // marshalMap marshals given protoreflect.Map. func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error { e.StartObject() defer e.EndObject() - // Get a sorted list based on keyType first. - entries := make([]mapEntry, 0, mmap.Len()) - mmap.Range(func(key pref.MapKey, val pref.Value) bool { - entries = append(entries, mapEntry{key: key, value: val}) - return true - }) - sortMap(fd.MapKey().Kind(), entries) - - // Write out sorted list. - for _, entry := range entries { - if err := e.WriteName(entry.key.String()); err != nil { - return err - } - if err := e.marshalSingular(entry.value, fd.MapValue()); err != nil { - return err - } - } - return nil -} - -// sortMap orders list based on value of key field for deterministic ordering. -func sortMap(keyKind pref.Kind, values []mapEntry) { - sort.Slice(values, func(i, j int) bool { - switch keyKind { - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, - pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - return values[i].key.Int() < values[j].key.Int() - - case pref.Uint32Kind, pref.Fixed32Kind, - pref.Uint64Kind, pref.Fixed64Kind: - return values[i].key.Uint() < values[j].key.Uint() + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false } - return values[i].key.String() < values[j].key.String() - }) -} - -// marshalExtensions marshals extension fields. -func (e encoder) marshalExtensions(m pref.Message) error { - type entry struct { - key string - value pref.Value - desc pref.FieldDescriptor - } - - // Get a sorted list based on field key first. - var entries []entry - m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { - if !fd.IsExtension() { - return true + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false } - - // For MessageSet extensions, the name used is the parent message. - name := fd.FullName() - if messageset.IsMessageSetExtension(fd) { - name = name.Parent() - } - - // Use [name] format for JSON field name. - entries = append(entries, entry{ - key: string(name), - value: v, - desc: fd, - }) return true }) - - // Sort extensions lexicographically. - sort.Slice(entries, func(i, j int) bool { - return entries[i].key < entries[j].key - }) - - // Write out sorted list. - for _, entry := range entries { - // JSON field name is the proto field name enclosed in [], similar to - // textproto. This is consistent with Go v1 lib. C++ lib v3.7.0 does not - // marshal out extension fields. - if err := e.WriteName("[" + entry.key + "]"); err != nil { - return err - } - if err := e.marshalValue(entry.value, entry.desc); err != nil { - return err - } - } - return nil + return err } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index def7377c78b5..72924a9050cf 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -7,6 +7,7 @@ package protojson import ( "bytes" "fmt" + "math" "strconv" "strings" "time" @@ -106,13 +107,11 @@ func (e encoder) marshalAny(m pref.Message) error { fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) fdValue := fds.ByNumber(genid.Any_Value_field_number) - // Start writing the JSON object. - e.StartObject() - defer e.EndObject() - if !m.Has(fdType) { if !m.Has(fdValue) { // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() return nil } else { // Return error if type_url field is not set, but value is set. @@ -123,14 +122,8 @@ func (e encoder) marshalAny(m pref.Message) error { typeVal := m.Get(fdType) valueVal := m.Get(fdValue) - // Marshal out @type field. - typeURL := typeVal.String() - e.WriteName("@type") - if err := e.WriteString(typeURL); err != nil { - return err - } - // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() emt, err := e.opts.Resolver.FindMessageByURL(typeURL) if err != nil { return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) @@ -149,12 +142,21 @@ func (e encoder) marshalAny(m pref.Message) error { // with corresponding custom JSON encoding of the embedded message as a // field. if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + e.WriteName("value") return marshal(e, em) } // Else, marshal out the embedded message's fields in this Any object. - if err := e.marshalFields(em); err != nil { + if err := e.marshalMessage(em, typeURL); err != nil { return err } @@ -494,6 +496,11 @@ func (e encoder) marshalKnownValue(m pref.Message) error { if fd == nil { return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } return e.marshalSingular(m.Get(fd), fd) } @@ -604,14 +611,11 @@ func (e encoder) marshalDuration(m pref.Message) error { } // Generated output always contains 0, 3, 6, or 9 fractional digits, // depending on required precision, followed by the suffix "s". - f := "%d.%09d" - if nanos < 0 { - nanos = -nanos - if secs == 0 { - f = "-%d.%09d" - } + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos } - x := fmt.Sprintf(f, secs, nanos) + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, ".000") diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index cab95a427356..8fb1d9e086a2 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -6,7 +6,6 @@ package prototext import ( "fmt" - "strings" "unicode/utf8" "google.golang.org/protobuf/internal/encoding/messageset" @@ -23,6 +22,7 @@ import ( ) // Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) } @@ -51,8 +51,9 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message using options in -// UnmarshalOptions object. +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { return o.unmarshal(b, m) } @@ -158,21 +159,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { switch tok.NameKind() { case text.IdentName: name = pref.Name(tok.IdentName()) - fd = fieldDescs.ByName(name) - if fd == nil { - // The proto name of a group field is in all lowercase, - // while the textproto field name is the group message name. - gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name { - fd = gd - } - } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name { - fd = nil // reset since field name is actually the message name - } + fd = fieldDescs.ByTextName(string(name)) case text.TypeName: // Handle extensions only. This code path is not for Any. - xt, xtErr = d.findExtension(pref.FullName(tok.TypeName())) + xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) case text.FieldNumber: isFieldNumberName = true @@ -269,15 +260,6 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { return nil } -// findExtension returns protoreflect.ExtensionType from the Resolver if found. -func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { - xt, err := d.opts.Resolver.FindExtensionByName(xtName) - if err == nil { - return xt, nil - } - return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) -} - // unmarshalSingular unmarshals a non-repeated field value specified by the // given FieldDescriptor. func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 0877d71c5199..8d5304dc5b32 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -6,7 +6,6 @@ package prototext import ( "fmt" - "sort" "strconv" "unicode/utf8" @@ -16,10 +15,11 @@ import ( "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -169,35 +169,15 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { // If unable to expand, continue on to marshal Any as a regular message. } - // Marshal known fields. - fieldDescs := messageDesc.Fields() - size := fieldDescs.Len() - for i := 0; i < size; { - fd := fieldDescs.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - - if fd == nil || !m.Has(fd) { - continue - } - - name := fd.Name() - // Use type name for group field name. - if fd.Kind() == pref.GroupKind { - name = fd.Message().Name() - } - val := m.Get(fd) - if err := e.marshalField(string(name), val, fd); err != nil { - return err + // Marshal fields. + var err error + order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if err = e.marshalField(fd.TextName(), v, fd); err != nil { + return false } - } - - // Marshal extensions. - if err := e.marshalExtensions(m); err != nil { + return true + }) + if err != nil { return err } @@ -290,7 +270,7 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto // marshalMap marshals the given protoreflect.Map as multiple name-value fields. func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { var err error - mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { e.WriteName(name) e.StartMessage() defer e.EndMessage() @@ -311,48 +291,6 @@ func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) return err } -// marshalExtensions marshals extension fields. -func (e encoder) marshalExtensions(m pref.Message) error { - type entry struct { - key string - value pref.Value - desc pref.FieldDescriptor - } - - // Get a sorted list based on field key first. - var entries []entry - m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { - if !fd.IsExtension() { - return true - } - // For MessageSet extensions, the name used is the parent message. - name := fd.FullName() - if messageset.IsMessageSetExtension(fd) { - name = name.Parent() - } - entries = append(entries, entry{ - key: string(name), - value: v, - desc: fd, - }) - return true - }) - // Sort extensions lexicographically. - sort.Slice(entries, func(i, j int) bool { - return entries[i].key < entries[j].key - }) - - // Write out sorted list. - for _, entry := range entries { - // Extension field name is the proto field name enclosed in []. - name := "[" + entry.key + "]" - if err := e.marshalField(name, entry.value, entry.desc); err != nil { - return err - } - } - return nil -} - // marshalUnknown parses the given []byte and marshals fields out. // This function assumes proper encoding in the given []byte. func (e encoder) marshalUnknown(b []byte) { diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index e7af0fe0de2f..360c63329d4d 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -42,6 +42,8 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { name = "FileImports" case pref.Descriptor: name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + default: + name = reflect.ValueOf(vs).Elem().Type().Name() } start, end = name+"{", "}" } diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go index a904dd1f91a2..49c8676d484a 100644 --- a/vendor/google.golang.org/protobuf/internal/detrand/rand.go +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -26,6 +26,14 @@ func Bool() bool { return randSeed%2 == 1 } +// Intn returns a deterministically random integer between 0 and n-1, inclusive. +func Intn(n int) int { + if n <= 0 { + panic("must be positive") + } + return int(randSeed % uint64(n)) +} + // randSeed is a best-effort at an approximate hash of the Go binary. var randSeed = binaryHash() diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go index 2eb7023b2fcf..50578d6593c1 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -94,7 +94,7 @@ func (t Token) Pos() int { return t.pos } -// Name returns the object name if token is Name, else it will return an error. +// Name returns the object name if token is Name, else it panics. func (t Token) Name() string { if t.kind == Name { return t.str @@ -154,8 +154,7 @@ func (t Token) Int(bitSize int) (int64, bool) { return n, true } -// Uint returns the signed integer number if token is Number, else it will -// return an error. +// Uint returns the signed integer number if token is Number. // // The given bitSize specifies the unsigned integer type that the result must // fit into. It returns false if the number is not an unsigned integer value diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go index b1eeea507978..c1866f3c1a78 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -11,10 +11,9 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" ) -// The MessageSet wire format is equivalent to a message defiend as follows, +// The MessageSet wire format is equivalent to a message defined as follows, // where each Item defines an extension field with a field number of 'type_id' // and content of 'message'. MessageSet extensions must be non-repeated message // fields. @@ -48,33 +47,17 @@ func IsMessageSet(md pref.MessageDescriptor) bool { return ok && xmd.IsMessageSet() } -// IsMessageSetExtension reports this field extends a MessageSet. +// IsMessageSetExtension reports this field properly extends a MessageSet. func IsMessageSetExtension(fd pref.FieldDescriptor) bool { - if fd.Name() != ExtensionName { + switch { + case fd.Name() != ExtensionName: return false - } - if fd.FullName().Parent() != fd.Message().FullName() { + case !IsMessageSet(fd.ContainingMessage()): + return false + case fd.FullName().Parent() != fd.Message().FullName(): return false } - return IsMessageSet(fd.ContainingMessage()) -} - -// FindMessageSetExtension locates a MessageSet extension field by name. -// In text and JSON formats, the extension name used is the message itself. -// The extension field name is derived by appending ExtensionName. -func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) { - name := s.Append(ExtensionName) - xt, err := r.FindExtensionByName(name) - if err != nil { - if err == preg.NotFound { - return nil, err - } - return nil, errors.Wrap(err, "%q", name) - } - if !IsMessageSetExtension(xt.TypeDescriptor()) { - return nil, preg.NotFound - } - return xt, nil + return true } // SizeField returns the size of a MessageSet item field containing an extension diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 16c02d7b627b..38f1931c6fd1 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -104,7 +104,7 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p case strings.HasPrefix(s, "json="): jsonName := s[len("json="):] if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { - f.L1.JSONName.Init(jsonName) + f.L1.StringName.InitJSON(jsonName) } case s == "packed": f.L1.HasPacked = true diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index c4ba1c598fb1..aa66bdd06a3a 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -32,7 +32,6 @@ type Encoder struct { encoderState indent string - newline string // set to "\n" if len(indent) > 0 delims [2]byte outputASCII bool } @@ -61,7 +60,6 @@ func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, erro return nil, errors.New("indent may only be composed of space and tab characters") } e.indent = indent - e.newline = "\n" } switch delims { case [2]byte{0, 0}: @@ -126,7 +124,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { // are used to represent both the proto string and bytes type. r = rune(in[0]) fallthrough - case r < ' ' || r == '"' || r == '\\': + case r < ' ' || r == '"' || r == '\\' || r == 0x7f: out = append(out, '\\') switch r { case '"', '\\': @@ -143,7 +141,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { out = strconv.AppendUint(out, uint64(r), 16) } in = in[n:] - case outputASCII && r >= utf8.RuneSelf: + case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f): out = append(out, '\\') if r <= math.MaxUint16 { out = append(out, 'u') @@ -168,7 +166,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte { // escaping. If no characters need escaping, this returns the input length. func indexNeedEscapeInString(s string) int { for i := 0; i < len(s); i++ { - if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f { return i } } diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go deleted file mode 100644 index 517c4e2a041a..000000000000 --- a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fieldsort defines an ordering of fields. -// -// The ordering defined by this package matches the historic behavior of the proto -// package, placing extensions first and oneofs last. -// -// There is no guarantee about stability of the wire encoding, and users should not -// depend on the order defined in this package as it is subject to change without -// notice. -package fieldsort - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Less returns true if field a comes before field j in ordered wire marshal output. -func Less(a, b protoreflect.FieldDescriptor) bool { - ea := a.IsExtension() - eb := b.IsExtension() - oa := a.ContainingOneof() - ob := b.ContainingOneof() - switch { - case ea != eb: - return ea - case oa != nil && ob != nil: - if oa == ob { - return a.Number() < b.Number() - } - return oa.Index() < ob.Index() - case oa != nil && !oa.IsSynthetic(): - return false - case ob != nil && !ob.IsSynthetic(): - return true - default: - return a.Number() < b.Number() - } -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index d02d770c9848..b293b6947361 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. // Package filedesc provides functionality for constructing descriptors. +// +// The types in this package implement interfaces in the protoreflect package +// related to protobuf descripriptors. package filedesc import ( diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 9385126fba64..98ab142aeee6 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -13,6 +13,7 @@ import ( "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" @@ -99,15 +100,6 @@ func (fd *File) lazyInitOnce() { fd.mu.Unlock() } -// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code -// to be able to retrieve the raw descriptor. -// -// WARNING: This method is exempt from the compatibility promise and may be -// removed in the future without warning. -func (fd *File) ProtoLegacyRawDesc() []byte { - return fd.builder.RawDescriptor -} - // GoPackagePath is a pseudo-internal API for determining the Go package path // that this file descriptor is declared in. // @@ -207,7 +199,7 @@ type ( Number pref.FieldNumber Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers Kind pref.Kind - JSONName jsonName + StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions HasPacked bool // promoted from google.protobuf.FieldOptions @@ -277,8 +269,9 @@ func (fd *Field) Options() pref.ProtoMessage { func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has } -func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } @@ -373,7 +366,7 @@ type ( } ExtensionL2 struct { Options func() pref.ProtoMessage - JSONName jsonName + StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue @@ -391,8 +384,9 @@ func (xd *Extension) Options() pref.ProtoMessage { func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has } -func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional @@ -506,27 +500,50 @@ func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syn func (d *Base) IsPlaceholder() bool { return false } func (d *Base) ProtoInternal(pragma.DoNotImplement) {} -type jsonName struct { - has bool - once sync.Once - name string +type stringName struct { + hasJSON bool + once sync.Once + nameJSON string + nameText string } -// Init initializes the name. It is exported for use by other internal packages. -func (js *jsonName) Init(s string) { - js.has = true - js.name = s +// InitJSON initializes the name. It is exported for use by other internal packages. +func (s *stringName) InitJSON(name string) { + s.hasJSON = true + s.nameJSON = name } -func (js *jsonName) get(fd pref.FieldDescriptor) string { - if !js.has { - js.once.Do(func() { - js.name = strs.JSONCamelCase(string(fd.Name())) - }) - } - return js.name +func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { + s.once.Do(func() { + if fd.IsExtension() { + // For extensions, JSON and text are formatted the same way. + var name string + if messageset.IsMessageSetExtension(fd) { + name = string("[" + fd.FullName().Parent() + "]") + } else { + name = string("[" + fd.FullName() + "]") + } + s.nameJSON = name + s.nameText = name + } else { + // Format the JSON name. + if !s.hasJSON { + s.nameJSON = strs.JSONCamelCase(string(fd.Name())) + } + + // Format the text name. + s.nameText = string(fd.Name()) + if fd.Kind() == pref.GroupKind { + s.nameText = string(fd.Message().Name()) + } + } + }) + return s } +func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } + func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { dv := defaultValue{has: v.IsValid(), val: v, enum: ev} if b, ok := v.Interface().([]byte); ok { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e672233e77ea..198451e3ec94 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -451,7 +451,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des case genid.FieldDescriptorProto_Name_field_number: fd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_JsonName_field_number: - fd.L1.JSONName.Init(sb.MakeString(v)) + fd.L1.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages case genid.FieldDescriptorProto_TypeName_field_number: @@ -551,7 +551,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] switch num { case genid.FieldDescriptorProto_JsonName_field_number: - xd.L2.JSONName.Init(sb.MakeString(v)) + xd.L2.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions case genid.FieldDescriptorProto_TypeName_field_number: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index c876cd34d70a..aa294fff99a8 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -6,9 +6,12 @@ package filedesc import ( "fmt" + "math" "sort" "sync" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/descfmt" "google.golang.org/protobuf/internal/errors" @@ -245,6 +248,7 @@ type OneofFields struct { once sync.Once byName map[pref.Name]pref.FieldDescriptor // protected by once byJSON map[string]pref.FieldDescriptor // protected by once + byText map[string]pref.FieldDescriptor // protected by once byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once } @@ -252,6 +256,7 @@ func (p *OneofFields) Len() int { return func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} @@ -261,11 +266,13 @@ func (p *OneofFields) lazyInit() *OneofFields { if len(p.List) > 0 { p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) for _, f := range p.List { // Field names and numbers are guaranteed to be unique. p.byName[f.Name()] = f p.byJSON[f.JSONName()] = f + p.byText[f.TextName()] = f p.byNum[f.Number()] = f } } @@ -274,9 +281,170 @@ func (p *OneofFields) lazyInit() *OneofFields { } type SourceLocations struct { + // List is a list of SourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. List []pref.SourceLocation + + // File is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + File pref.FileDescriptor + + once sync.Once + byPath map[pathKey]int +} + +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.List[i] + } + return pref.SourceLocation{} +} +func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { + if p.File != nil && desc != nil && p.File != desc.ParentFile() { + return pref.SourceLocation{} // mismatching parent files + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case pref.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case pref.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.FieldDescriptor: + isExtension := desc.(pref.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Extension_field_number)) + default: + return pref.SourceLocation{} + } + } else { + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Field_field_number)) + default: + return pref.SourceLocation{} + } + } + case pref.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.EnumDescriptor: + path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) + default: + return pref.SourceLocation{} + } + case pref.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) + default: + return pref.SourceLocation{} + } + case pref.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.ServiceDescriptor: + path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) + default: + return pref.SourceLocation{} + } + default: + return pref.SourceLocation{} + } + } } +func (p *SourceLocations) lazyInit() *SourceLocations { + p.once.Do(func() { + if len(p.List) > 0 { + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.List)) + for i, l := range p.List { + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] } + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.List)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.List[idxs[i]].Next = idxs[i+1] + } + p.List[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p pref.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 6a8825e8027b..30db19fdc75a 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -142,6 +142,7 @@ type Fields struct { once sync.Once byName map[protoreflect.Name]*Field // protected by once byJSON map[string]*Field // protected by once + byText map[string]*Field // protected by once byNum map[protoreflect.FieldNumber]*Field // protected by once } @@ -163,6 +164,12 @@ func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { } return nil } +func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byText[s]; d != nil { + return d + } + return nil +} func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { if d := p.lazyInit().byNum[n]; d != nil { return d @@ -178,6 +185,7 @@ func (p *Fields) lazyInit() *Fields { if len(p.List) > 0 { p.byName = make(map[protoreflect.Name]*Field, len(p.List)) p.byJSON = make(map[string]*Field, len(p.List)) + p.byText = make(map[string]*Field, len(p.List)) p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) for i := range p.List { d := &p.List[i] @@ -187,6 +195,9 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byJSON[d.JSONName()]; !ok { p.byJSON[d.JSONName()] = d } + if _, ok := p.byText[d.TextName()]; !ok { + p.byText[d.TextName()] = d + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index b5974528db65..abee5f30e9fd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -167,7 +167,7 @@ func (Export) MessageTypeOf(m message) pref.MessageType { if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } - return legacyLoadMessageInfo(reflect.TypeOf(m), "") + return legacyLoadMessageType(reflect.TypeOf(m), "") } // MessageStringOf returns the message value as a string, diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index c00744d385cf..cb4b482d166f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -10,6 +10,7 @@ import ( "sync" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" pref "google.golang.org/protobuf/reflect/protoreflect" preg "google.golang.org/protobuf/reflect/protoregistry" @@ -20,6 +21,7 @@ type errInvalidUTF8 struct{} func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } func (errInvalidUTF8) InvalidUTF8() bool { return true } +func (errInvalidUTF8) Unwrap() error { return errors.Error } // initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. // @@ -242,7 +244,7 @@ func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if p.Elem().IsNil() { p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) @@ -276,7 +278,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ Buf: v, @@ -420,7 +422,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ Buf: b, @@ -494,7 +496,7 @@ func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderF } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } m := reflect.New(f.mi.GoReflectType.Elem()).Interface() mp := pointerOfIface(m) @@ -550,7 +552,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } mp := reflect.New(goType.Elem()) o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -613,7 +615,7 @@ func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wt } v, n := protowire.ConsumeBytes(b) if n < 0 { - return pref.Value{}, out, protowire.ParseError(n) + return pref.Value{}, out, errDecode } m := list.NewElement() o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -681,7 +683,7 @@ func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wt } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return pref.Value{}, out, protowire.ParseError(n) + return pref.Value{}, out, errDecode } m := list.NewElement() o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ @@ -767,7 +769,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } mp := reflect.New(goType.Elem()) o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index ff198d0a153b..1a509b63ebc1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -15,13 +15,13 @@ import ( ) // sizeBool returns the size of wire encoding a bool pointer as a Bool. -func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bool() return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) } // appendBool wire encodes a bool pointer as a Bool. -func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bool() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v)) @@ -29,7 +29,7 @@ func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byt } // consumeBool wire decodes a bool pointer as a Bool. -func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -45,7 +45,7 @@ func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bool() = protowire.DecodeBool(v) out.n = n @@ -61,7 +61,7 @@ var coderBool = pointerCoderFuncs{ // sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. // The zero value is not encoded. -func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bool() if v == false { return 0 @@ -71,7 +71,7 @@ func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { // appendBoolNoZero wire encodes a bool pointer as a Bool. // The zero value is not encoded. -func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bool() if v == false { return b, nil @@ -90,14 +90,14 @@ var coderBoolNoZero = pointerCoderFuncs{ // sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. // It panics if the pointer is nil. -func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.BoolPtr() return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) } // appendBoolPtr wire encodes a *bool pointer as a Bool. // It panics if the pointer is nil. -func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.BoolPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v)) @@ -105,7 +105,7 @@ func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeBoolPtr wire decodes a *bool pointer as a Bool. -func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -121,7 +121,7 @@ func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.BoolPtr() if *vp == nil { @@ -140,7 +140,7 @@ var coderBoolPtr = pointerCoderFuncs{ } // sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. -func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BoolSlice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) @@ -149,7 +149,7 @@ func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendBoolSlice encodes a []bool pointer as a repeated Bool. -func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BoolSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -159,13 +159,13 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. -func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -180,7 +180,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, protowire.DecodeBool(v)) b = b[n:] @@ -204,7 +204,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, protowire.DecodeBool(v)) out.n = n @@ -219,7 +219,7 @@ var coderBoolSlice = pointerCoderFuncs{ } // sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. -func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BoolSlice() if len(s) == 0 { return 0 @@ -232,7 +232,7 @@ func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size i } // appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. -func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BoolSlice() if len(s) == 0 { return b, nil @@ -257,19 +257,19 @@ var coderBoolPackedSlice = pointerCoderFuncs{ } // sizeBoolValue returns the size of wire encoding a bool value as a Bool. -func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) } // appendBoolValue encodes a bool value as a Bool. -func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) return b, nil } // consumeBoolValue decodes a bool value as a Bool. -func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -285,7 +285,7 @@ func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil @@ -299,7 +299,7 @@ var coderBoolValue = valueCoderFuncs{ } // sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. -func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -309,7 +309,7 @@ func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) } // appendBoolSliceValue encodes a []bool value as a repeated Bool. -func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -320,12 +320,12 @@ func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. -func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -340,7 +340,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) b = b[n:] @@ -363,7 +363,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) out.n = n @@ -378,7 +378,7 @@ var coderBoolSliceValue = valueCoderFuncs{ } // sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. -func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -393,7 +393,7 @@ func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp } // appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. -func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -421,19 +421,19 @@ var coderBoolPackedSliceValue = valueCoderFuncs{ } // sizeEnumValue returns the size of wire encoding a value as a Enum. -func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(v.Enum())) } // appendEnumValue encodes a value as a Enum. -func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(v.Enum())) return b, nil } // consumeEnumValue decodes a value as a Enum. -func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -449,7 +449,7 @@ func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil @@ -463,7 +463,7 @@ var coderEnumValue = valueCoderFuncs{ } // sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. -func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -473,7 +473,7 @@ func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) } // appendEnumSliceValue encodes a [] value as a repeated Enum. -func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -484,12 +484,12 @@ func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeEnumSliceValue wire decodes a [] value as a repeated Enum. -func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -504,7 +504,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) b = b[n:] @@ -527,7 +527,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) out.n = n @@ -542,7 +542,7 @@ var coderEnumSliceValue = valueCoderFuncs{ } // sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. -func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -557,7 +557,7 @@ func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp } // appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. -func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -585,13 +585,13 @@ var coderEnumPackedSliceValue = valueCoderFuncs{ } // sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. -func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt32 wire encodes a int32 pointer as a Int32. -func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -599,7 +599,7 @@ func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeInt32 wire decodes a int32 pointer as a Int32. -func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -615,7 +615,7 @@ func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(v) out.n = n @@ -631,7 +631,7 @@ var coderInt32 = pointerCoderFuncs{ // sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. // The zero value is not encoded. -func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -641,7 +641,7 @@ func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendInt32NoZero wire encodes a int32 pointer as a Int32. // The zero value is not encoded. -func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -660,14 +660,14 @@ var coderInt32NoZero = pointerCoderFuncs{ // sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. // It panics if the pointer is nil. -func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int32Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt32Ptr wire encodes a *int32 pointer as a Int32. // It panics if the pointer is nil. -func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -675,7 +675,7 @@ func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeInt32Ptr wire decodes a *int32 pointer as a Int32. -func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -691,7 +691,7 @@ func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -710,7 +710,7 @@ var coderInt32Ptr = pointerCoderFuncs{ } // sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. -func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -719,7 +719,7 @@ func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendInt32Slice encodes a []int32 pointer as a repeated Int32. -func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -729,13 +729,13 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. -func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -750,7 +750,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(v)) b = b[n:] @@ -774,7 +774,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(v)) out.n = n @@ -789,7 +789,7 @@ var coderInt32Slice = pointerCoderFuncs{ } // sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. -func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -802,7 +802,7 @@ func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. -func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -827,19 +827,19 @@ var coderInt32PackedSlice = pointerCoderFuncs{ } // sizeInt32Value returns the size of wire encoding a int32 value as a Int32. -func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) } // appendInt32Value encodes a int32 value as a Int32. -func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(int32(v.Int()))) return b, nil } // consumeInt32Value decodes a int32 value as a Int32. -func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -855,7 +855,7 @@ func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(v)), out, nil @@ -869,7 +869,7 @@ var coderInt32Value = valueCoderFuncs{ } // sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. -func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -879,7 +879,7 @@ func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendInt32SliceValue encodes a []int32 value as a repeated Int32. -func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -890,12 +890,12 @@ func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. -func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -910,7 +910,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) b = b[n:] @@ -933,7 +933,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) out.n = n @@ -948,7 +948,7 @@ var coderInt32SliceValue = valueCoderFuncs{ } // sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. -func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -963,7 +963,7 @@ func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. -func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -991,13 +991,13 @@ var coderInt32PackedSliceValue = valueCoderFuncs{ } // sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. -func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) } // appendSint32 wire encodes a int32 pointer as a Sint32. -func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) @@ -1005,7 +1005,7 @@ func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeSint32 wire decodes a int32 pointer as a Sint32. -func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1021,7 +1021,7 @@ func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) out.n = n @@ -1037,7 +1037,7 @@ var coderSint32 = pointerCoderFuncs{ // sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. // The zero value is not encoded. -func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -1047,7 +1047,7 @@ func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendSint32NoZero wire encodes a int32 pointer as a Sint32. // The zero value is not encoded. -func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -1066,14 +1066,14 @@ var coderSint32NoZero = pointerCoderFuncs{ // sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. // It panics if the pointer is nil. -func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int32Ptr() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) } // appendSint32Ptr wire encodes a *int32 pointer as a Sint32. // It panics if the pointer is nil. -func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) @@ -1081,7 +1081,7 @@ func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. -func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1097,7 +1097,7 @@ func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -1116,7 +1116,7 @@ var coderSint32Ptr = pointerCoderFuncs{ } // sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. -func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) @@ -1125,7 +1125,7 @@ func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendSint32Slice encodes a []int32 pointer as a repeated Sint32. -func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1135,13 +1135,13 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. -func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1156,7 +1156,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) b = b[n:] @@ -1180,7 +1180,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) out.n = n @@ -1195,7 +1195,7 @@ var coderSint32Slice = pointerCoderFuncs{ } // sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. -func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -1208,7 +1208,7 @@ func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. -func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -1233,19 +1233,19 @@ var coderSint32PackedSlice = pointerCoderFuncs{ } // sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. -func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) } // appendSint32Value encodes a int32 value as a Sint32. -func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) return b, nil } // consumeSint32Value decodes a int32 value as a Sint32. -func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -1261,7 +1261,7 @@ func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil @@ -1275,7 +1275,7 @@ var coderSint32Value = valueCoderFuncs{ } // sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. -func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1285,7 +1285,7 @@ func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendSint32SliceValue encodes a []int32 value as a repeated Sint32. -func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1296,12 +1296,12 @@ func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. -func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -1316,7 +1316,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) b = b[n:] @@ -1339,7 +1339,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) out.n = n @@ -1354,7 +1354,7 @@ var coderSint32SliceValue = valueCoderFuncs{ } // sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. -func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1369,7 +1369,7 @@ func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. -func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1397,13 +1397,13 @@ var coderSint32PackedSliceValue = valueCoderFuncs{ } // sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. -func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendUint32 wire encodes a uint32 pointer as a Uint32. -func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1411,7 +1411,7 @@ func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeUint32 wire decodes a uint32 pointer as a Uint32. -func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1427,7 +1427,7 @@ func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint32() = uint32(v) out.n = n @@ -1443,7 +1443,7 @@ var coderUint32 = pointerCoderFuncs{ // sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. // The zero value is not encoded. -func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() if v == 0 { return 0 @@ -1453,7 +1453,7 @@ func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendUint32NoZero wire encodes a uint32 pointer as a Uint32. // The zero value is not encoded. -func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() if v == 0 { return b, nil @@ -1472,14 +1472,14 @@ var coderUint32NoZero = pointerCoderFuncs{ // sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. // It panics if the pointer is nil. -func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Uint32Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. // It panics if the pointer is nil. -func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1487,7 +1487,7 @@ func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. -func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1503,7 +1503,7 @@ func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint32Ptr() if *vp == nil { @@ -1522,7 +1522,7 @@ var coderUint32Ptr = pointerCoderFuncs{ } // sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. -func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -1531,7 +1531,7 @@ func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. -func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1541,13 +1541,13 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. -func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1562,7 +1562,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, uint32(v)) b = b[n:] @@ -1586,7 +1586,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, uint32(v)) out.n = n @@ -1601,7 +1601,7 @@ var coderUint32Slice = pointerCoderFuncs{ } // sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. -func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() if len(s) == 0 { return 0 @@ -1614,7 +1614,7 @@ func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. -func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() if len(s) == 0 { return b, nil @@ -1639,19 +1639,19 @@ var coderUint32PackedSlice = pointerCoderFuncs{ } // sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. -func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) } // appendUint32Value encodes a uint32 value as a Uint32. -func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) return b, nil } // consumeUint32Value decodes a uint32 value as a Uint32. -func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -1667,7 +1667,7 @@ func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint32(uint32(v)), out, nil @@ -1681,7 +1681,7 @@ var coderUint32Value = valueCoderFuncs{ } // sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. -func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1691,7 +1691,7 @@ func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. -func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -1702,12 +1702,12 @@ func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. -func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -1722,7 +1722,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) b = b[n:] @@ -1745,7 +1745,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) out.n = n @@ -1760,7 +1760,7 @@ var coderUint32SliceValue = valueCoderFuncs{ } // sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. -func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1775,7 +1775,7 @@ func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. -func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -1803,13 +1803,13 @@ var coderUint32PackedSliceValue = valueCoderFuncs{ } // sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. -func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt64 wire encodes a int64 pointer as a Int64. -func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1817,7 +1817,7 @@ func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeInt64 wire decodes a int64 pointer as a Int64. -func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1833,7 +1833,7 @@ func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = int64(v) out.n = n @@ -1849,7 +1849,7 @@ var coderInt64 = pointerCoderFuncs{ // sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. // The zero value is not encoded. -func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -1859,7 +1859,7 @@ func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendInt64NoZero wire encodes a int64 pointer as a Int64. // The zero value is not encoded. -func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -1878,14 +1878,14 @@ var coderInt64NoZero = pointerCoderFuncs{ // sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. // It panics if the pointer is nil. -func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int64Ptr() return f.tagsize + protowire.SizeVarint(uint64(v)) } // appendInt64Ptr wire encodes a *int64 pointer as a Int64. // It panics if the pointer is nil. -func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, uint64(v)) @@ -1893,7 +1893,7 @@ func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeInt64Ptr wire decodes a *int64 pointer as a Int64. -func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -1909,7 +1909,7 @@ func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -1928,7 +1928,7 @@ var coderInt64Ptr = pointerCoderFuncs{ } // sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. -func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(uint64(v)) @@ -1937,7 +1937,7 @@ func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendInt64Slice encodes a []int64 pointer as a repeated Int64. -func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -1947,13 +1947,13 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. -func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -1968,7 +1968,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int64(v)) b = b[n:] @@ -1992,7 +1992,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int64(v)) out.n = n @@ -2007,7 +2007,7 @@ var coderInt64Slice = pointerCoderFuncs{ } // sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. -func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -2020,7 +2020,7 @@ func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. -func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -2045,19 +2045,19 @@ var coderInt64PackedSlice = pointerCoderFuncs{ } // sizeInt64Value returns the size of wire encoding a int64 value as a Int64. -func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(uint64(v.Int())) } // appendInt64Value encodes a int64 value as a Int64. -func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, uint64(v.Int())) return b, nil } // consumeInt64Value decodes a int64 value as a Int64. -func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2073,7 +2073,7 @@ func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(int64(v)), out, nil @@ -2087,7 +2087,7 @@ var coderInt64Value = valueCoderFuncs{ } // sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. -func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2097,7 +2097,7 @@ func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendInt64SliceValue encodes a []int64 value as a repeated Int64. -func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2108,12 +2108,12 @@ func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. -func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2128,7 +2128,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) b = b[n:] @@ -2151,7 +2151,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) out.n = n @@ -2166,7 +2166,7 @@ var coderInt64SliceValue = valueCoderFuncs{ } // sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. -func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2181,7 +2181,7 @@ func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. -func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2209,13 +2209,13 @@ var coderInt64PackedSliceValue = valueCoderFuncs{ } // sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. -func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) } // appendSint64 wire encodes a int64 pointer as a Sint64. -func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) @@ -2223,7 +2223,7 @@ func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeSint64 wire decodes a int64 pointer as a Sint64. -func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2239,7 +2239,7 @@ func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = protowire.DecodeZigZag(v) out.n = n @@ -2255,7 +2255,7 @@ var coderSint64 = pointerCoderFuncs{ // sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. // The zero value is not encoded. -func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -2265,7 +2265,7 @@ func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendSint64NoZero wire encodes a int64 pointer as a Sint64. // The zero value is not encoded. -func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -2284,14 +2284,14 @@ var coderSint64NoZero = pointerCoderFuncs{ // sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. // It panics if the pointer is nil. -func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Int64Ptr() return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) } // appendSint64Ptr wire encodes a *int64 pointer as a Sint64. // It panics if the pointer is nil. -func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) @@ -2299,7 +2299,7 @@ func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. -func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2315,7 +2315,7 @@ func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -2334,7 +2334,7 @@ var coderSint64Ptr = pointerCoderFuncs{ } // sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. -func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) @@ -2343,7 +2343,7 @@ func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendSint64Slice encodes a []int64 pointer as a repeated Sint64. -func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -2353,13 +2353,13 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. -func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -2374,7 +2374,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, protowire.DecodeZigZag(v)) b = b[n:] @@ -2398,7 +2398,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, protowire.DecodeZigZag(v)) out.n = n @@ -2413,7 +2413,7 @@ var coderSint64Slice = pointerCoderFuncs{ } // sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. -func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -2426,7 +2426,7 @@ func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. -func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -2451,19 +2451,19 @@ var coderSint64PackedSlice = pointerCoderFuncs{ } // sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. -func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) } // appendSint64Value encodes a int64 value as a Sint64. -func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) return b, nil } // consumeSint64Value decodes a int64 value as a Sint64. -func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2479,7 +2479,7 @@ func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil @@ -2493,7 +2493,7 @@ var coderSint64Value = valueCoderFuncs{ } // sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. -func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2503,7 +2503,7 @@ func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendSint64SliceValue encodes a []int64 value as a repeated Sint64. -func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2514,12 +2514,12 @@ func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. -func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2534,7 +2534,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) b = b[n:] @@ -2557,7 +2557,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) out.n = n @@ -2572,7 +2572,7 @@ var coderSint64SliceValue = valueCoderFuncs{ } // sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. -func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2587,7 +2587,7 @@ func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. -func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2615,13 +2615,13 @@ var coderSint64PackedSliceValue = valueCoderFuncs{ } // sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. -func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() return f.tagsize + protowire.SizeVarint(v) } // appendUint64 wire encodes a uint64 pointer as a Uint64. -func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, v) @@ -2629,7 +2629,7 @@ func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeUint64 wire decodes a uint64 pointer as a Uint64. -func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2645,7 +2645,7 @@ func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint64() = v out.n = n @@ -2661,7 +2661,7 @@ var coderUint64 = pointerCoderFuncs{ // sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. // The zero value is not encoded. -func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() if v == 0 { return 0 @@ -2671,7 +2671,7 @@ func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendUint64NoZero wire encodes a uint64 pointer as a Uint64. // The zero value is not encoded. -func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() if v == 0 { return b, nil @@ -2690,14 +2690,14 @@ var coderUint64NoZero = pointerCoderFuncs{ // sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. // It panics if the pointer is nil. -func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.Uint64Ptr() return f.tagsize + protowire.SizeVarint(v) } // appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. // It panics if the pointer is nil. -func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendVarint(b, v) @@ -2705,7 +2705,7 @@ func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. -func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return out, errUnknown } @@ -2721,7 +2721,7 @@ func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint64Ptr() if *vp == nil { @@ -2740,7 +2740,7 @@ var coderUint64Ptr = pointerCoderFuncs{ } // sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. -func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() for _, v := range s { size += f.tagsize + protowire.SizeVarint(v) @@ -2749,7 +2749,7 @@ func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. -func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -2759,13 +2759,13 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. -func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { var v uint64 @@ -2780,7 +2780,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -2804,7 +2804,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI v, n = protowire.ConsumeVarint(b) } if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -2819,7 +2819,7 @@ var coderUint64Slice = pointerCoderFuncs{ } // sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. -func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() if len(s) == 0 { return 0 @@ -2832,7 +2832,7 @@ func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. -func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() if len(s) == 0 { return b, nil @@ -2857,19 +2857,19 @@ var coderUint64PackedSlice = pointerCoderFuncs{ } // sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. -func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeVarint(v.Uint()) } // appendUint64Value encodes a uint64 value as a Uint64. -func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendVarint(b, v.Uint()) return b, nil } // consumeUint64Value decodes a uint64 value as a Uint64. -func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.VarintType { return protoreflect.Value{}, out, errUnknown } @@ -2885,7 +2885,7 @@ func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint64(v), out, nil @@ -2899,7 +2899,7 @@ var coderUint64Value = valueCoderFuncs{ } // sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. -func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2909,7 +2909,7 @@ func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. -func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -2920,12 +2920,12 @@ func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. -func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { var v uint64 @@ -2940,7 +2940,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) b = b[n:] @@ -2963,7 +2963,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num v, n = protowire.ConsumeVarint(b) } if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) out.n = n @@ -2978,7 +2978,7 @@ var coderUint64SliceValue = valueCoderFuncs{ } // sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. -func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -2993,7 +2993,7 @@ func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. -func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3021,13 +3021,13 @@ var coderUint64PackedSliceValue = valueCoderFuncs{ } // sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. -func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendSfixed32 wire encodes a int32 pointer as a Sfixed32. -func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, uint32(v)) @@ -3035,13 +3035,13 @@ func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. -func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int32() = int32(v) out.n = n @@ -3057,7 +3057,7 @@ var coderSfixed32 = pointerCoderFuncs{ // sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. // The zero value is not encoded. -func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int32() if v == 0 { return 0 @@ -3067,7 +3067,7 @@ func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in // appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. // The zero value is not encoded. -func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int32() if v == 0 { return b, nil @@ -3086,13 +3086,13 @@ var coderSfixed32NoZero = pointerCoderFuncs{ // sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. // It panics if the pointer is nil. -func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. // It panics if the pointer is nil. -func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, uint32(v)) @@ -3100,13 +3100,13 @@ func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. -func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int32Ptr() if *vp == nil { @@ -3125,14 +3125,14 @@ var coderSfixed32Ptr = pointerCoderFuncs{ } // sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. -func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. -func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3142,18 +3142,18 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption } // consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. -func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int32(v)) b = b[n:] @@ -3167,7 +3167,7 @@ func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int32(v)) out.n = n @@ -3182,7 +3182,7 @@ var coderSfixed32Slice = pointerCoderFuncs{ } // sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. -func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int32Slice() if len(s) == 0 { return 0 @@ -3192,7 +3192,7 @@ func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si } // appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. -func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int32Slice() if len(s) == 0 { return b, nil @@ -3214,25 +3214,25 @@ var coderSfixed32PackedSlice = pointerCoderFuncs{ } // sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. -func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendSfixed32Value encodes a int32 value as a Sfixed32. -func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, uint32(v.Int())) return b, nil } // consumeSfixed32Value decodes a int32 value as a Sfixed32. -func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt32(int32(v)), out, nil @@ -3246,14 +3246,14 @@ var coderSfixed32Value = valueCoderFuncs{ } // sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. -func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. -func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3264,17 +3264,17 @@ func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64 } // consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. -func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) b = b[n:] @@ -3287,7 +3287,7 @@ func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.N } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) out.n = n @@ -3302,7 +3302,7 @@ var coderSfixed32SliceValue = valueCoderFuncs{ } // sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. -func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3313,7 +3313,7 @@ func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh } // appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. -func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3337,13 +3337,13 @@ var coderSfixed32PackedSliceValue = valueCoderFuncs{ } // sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. -func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFixed32 wire encodes a uint32 pointer as a Fixed32. -func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, v) @@ -3351,13 +3351,13 @@ func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeFixed32 wire decodes a uint32 pointer as a Fixed32. -func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint32() = v out.n = n @@ -3373,7 +3373,7 @@ var coderFixed32 = pointerCoderFuncs{ // sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. // The zero value is not encoded. -func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint32() if v == 0 { return 0 @@ -3383,7 +3383,7 @@ func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int // appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. // The zero value is not encoded. -func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint32() if v == 0 { return b, nil @@ -3402,13 +3402,13 @@ var coderFixed32NoZero = pointerCoderFuncs{ // sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. // It panics if the pointer is nil. -func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. // It panics if the pointer is nil. -func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, v) @@ -3416,13 +3416,13 @@ func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. -func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint32Ptr() if *vp == nil { @@ -3441,14 +3441,14 @@ var coderFixed32Ptr = pointerCoderFuncs{ } // sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. -func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. -func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3458,18 +3458,18 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions } // consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. -func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -3483,7 +3483,7 @@ func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -3498,7 +3498,7 @@ var coderFixed32Slice = pointerCoderFuncs{ } // sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. -func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint32Slice() if len(s) == 0 { return 0 @@ -3508,7 +3508,7 @@ func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz } // appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. -func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint32Slice() if len(s) == 0 { return b, nil @@ -3530,25 +3530,25 @@ var coderFixed32PackedSlice = pointerCoderFuncs{ } // sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. -func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendFixed32Value encodes a uint32 value as a Fixed32. -func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, uint32(v.Uint())) return b, nil } // consumeFixed32Value decodes a uint32 value as a Fixed32. -func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint32(uint32(v)), out, nil @@ -3562,14 +3562,14 @@ var coderFixed32Value = valueCoderFuncs{ } // sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. -func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. -func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3580,17 +3580,17 @@ func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. -func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) b = b[n:] @@ -3603,7 +3603,7 @@ func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) out.n = n @@ -3618,7 +3618,7 @@ var coderFixed32SliceValue = valueCoderFuncs{ } // sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. -func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3629,7 +3629,7 @@ func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha } // appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. -func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3653,13 +3653,13 @@ var coderFixed32PackedSliceValue = valueCoderFuncs{ } // sizeFloat returns the size of wire encoding a float32 pointer as a Float. -func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFloat wire encodes a float32 pointer as a Float. -func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float32() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, math.Float32bits(v)) @@ -3667,13 +3667,13 @@ func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeFloat wire decodes a float32 pointer as a Float. -func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Float32() = math.Float32frombits(v) out.n = n @@ -3689,7 +3689,7 @@ var coderFloat = pointerCoderFuncs{ // sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. // The zero value is not encoded. -func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Float32() if v == 0 && !math.Signbit(float64(v)) { return 0 @@ -3699,7 +3699,7 @@ func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendFloatNoZero wire encodes a float32 pointer as a Float. // The zero value is not encoded. -func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float32() if v == 0 && !math.Signbit(float64(v)) { return b, nil @@ -3718,13 +3718,13 @@ var coderFloatNoZero = pointerCoderFuncs{ // sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. // It panics if the pointer is nil. -func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed32() } // appendFloatPtr wire encodes a *float32 pointer as a Float. // It panics if the pointer is nil. -func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Float32Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed32(b, math.Float32bits(v)) @@ -3732,13 +3732,13 @@ func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeFloatPtr wire decodes a *float32 pointer as a Float. -func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Float32Ptr() if *vp == nil { @@ -3757,14 +3757,14 @@ var coderFloatPtr = pointerCoderFuncs{ } // sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. -func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float32Slice() size = len(s) * (f.tagsize + protowire.SizeFixed32()) return size } // appendFloatSlice encodes a []float32 pointer as a repeated Float. -func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float32Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -3774,18 +3774,18 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. -func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, math.Float32frombits(v)) b = b[n:] @@ -3799,7 +3799,7 @@ func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, math.Float32frombits(v)) out.n = n @@ -3814,7 +3814,7 @@ var coderFloatSlice = pointerCoderFuncs{ } // sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. -func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float32Slice() if len(s) == 0 { return 0 @@ -3824,7 +3824,7 @@ func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. -func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float32Slice() if len(s) == 0 { return b, nil @@ -3846,25 +3846,25 @@ var coderFloatPackedSlice = pointerCoderFuncs{ } // sizeFloatValue returns the size of wire encoding a float32 value as a Float. -func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed32() } // appendFloatValue encodes a float32 value as a Float. -func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) return b, nil } // consumeFloatValue decodes a float32 value as a Float. -func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed32Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil @@ -3878,14 +3878,14 @@ var coderFloatValue = valueCoderFuncs{ } // sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. -func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed32()) return size } // appendFloatSliceValue encodes a []float32 value as a repeated Float. -func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -3896,17 +3896,17 @@ func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. -func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) b = b[n:] @@ -3919,7 +3919,7 @@ func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) out.n = n @@ -3934,7 +3934,7 @@ var coderFloatSliceValue = valueCoderFuncs{ } // sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. -func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3945,7 +3945,7 @@ func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO } // appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. -func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -3969,13 +3969,13 @@ var coderFloatPackedSliceValue = valueCoderFuncs{ } // sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. -func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendSfixed64 wire encodes a int64 pointer as a Sfixed64. -func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, uint64(v)) @@ -3983,13 +3983,13 @@ func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([ } // consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. -func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Int64() = int64(v) out.n = n @@ -4005,7 +4005,7 @@ var coderSfixed64 = pointerCoderFuncs{ // sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. // The zero value is not encoded. -func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Int64() if v == 0 { return 0 @@ -4015,7 +4015,7 @@ func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in // appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. // The zero value is not encoded. -func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Int64() if v == 0 { return b, nil @@ -4034,13 +4034,13 @@ var coderSfixed64NoZero = pointerCoderFuncs{ // sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. // It panics if the pointer is nil. -func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. // It panics if the pointer is nil. -func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Int64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, uint64(v)) @@ -4048,13 +4048,13 @@ func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. -func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Int64Ptr() if *vp == nil { @@ -4073,14 +4073,14 @@ var coderSfixed64Ptr = pointerCoderFuncs{ } // sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. -func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. -func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4090,18 +4090,18 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption } // consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. -func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, int64(v)) b = b[n:] @@ -4115,7 +4115,7 @@ func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, int64(v)) out.n = n @@ -4130,7 +4130,7 @@ var coderSfixed64Slice = pointerCoderFuncs{ } // sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. -func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Int64Slice() if len(s) == 0 { return 0 @@ -4140,7 +4140,7 @@ func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si } // appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. -func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Int64Slice() if len(s) == 0 { return b, nil @@ -4162,25 +4162,25 @@ var coderSfixed64PackedSlice = pointerCoderFuncs{ } // sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. -func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendSfixed64Value encodes a int64 value as a Sfixed64. -func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, uint64(v.Int())) return b, nil } // consumeSfixed64Value decodes a int64 value as a Sfixed64. -func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfInt64(int64(v)), out, nil @@ -4194,14 +4194,14 @@ var coderSfixed64Value = valueCoderFuncs{ } // sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. -func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. -func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4212,17 +4212,17 @@ func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64 } // consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. -func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) b = b[n:] @@ -4235,7 +4235,7 @@ func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.N } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) out.n = n @@ -4250,7 +4250,7 @@ var coderSfixed64SliceValue = valueCoderFuncs{ } // sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. -func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4261,7 +4261,7 @@ func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh } // appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. -func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4285,13 +4285,13 @@ var coderSfixed64PackedSliceValue = valueCoderFuncs{ } // sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. -func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendFixed64 wire encodes a uint64 pointer as a Fixed64. -func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, v) @@ -4299,13 +4299,13 @@ func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([] } // consumeFixed64 wire decodes a uint64 pointer as a Fixed64. -func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Uint64() = v out.n = n @@ -4321,7 +4321,7 @@ var coderFixed64 = pointerCoderFuncs{ // sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. // The zero value is not encoded. -func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Uint64() if v == 0 { return 0 @@ -4331,7 +4331,7 @@ func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int // appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. // The zero value is not encoded. -func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Uint64() if v == 0 { return b, nil @@ -4350,13 +4350,13 @@ var coderFixed64NoZero = pointerCoderFuncs{ // sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. // It panics if the pointer is nil. -func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. // It panics if the pointer is nil. -func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Uint64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, v) @@ -4364,13 +4364,13 @@ func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. -func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Uint64Ptr() if *vp == nil { @@ -4389,14 +4389,14 @@ var coderFixed64Ptr = pointerCoderFuncs{ } // sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. -func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. -func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4406,18 +4406,18 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions } // consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. -func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, v) b = b[n:] @@ -4431,7 +4431,7 @@ func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, v) out.n = n @@ -4446,7 +4446,7 @@ var coderFixed64Slice = pointerCoderFuncs{ } // sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. -func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Uint64Slice() if len(s) == 0 { return 0 @@ -4456,7 +4456,7 @@ func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz } // appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. -func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Uint64Slice() if len(s) == 0 { return b, nil @@ -4478,25 +4478,25 @@ var coderFixed64PackedSlice = pointerCoderFuncs{ } // sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. -func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendFixed64Value encodes a uint64 value as a Fixed64. -func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, v.Uint()) return b, nil } // consumeFixed64Value decodes a uint64 value as a Fixed64. -func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfUint64(v), out, nil @@ -4510,14 +4510,14 @@ var coderFixed64Value = valueCoderFuncs{ } // sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. -func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. -func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4528,17 +4528,17 @@ func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. -func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) b = b[n:] @@ -4551,7 +4551,7 @@ func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfUint64(v)) out.n = n @@ -4566,7 +4566,7 @@ var coderFixed64SliceValue = valueCoderFuncs{ } // sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. -func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4577,7 +4577,7 @@ func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha } // appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. -func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4601,13 +4601,13 @@ var coderFixed64PackedSliceValue = valueCoderFuncs{ } // sizeDouble returns the size of wire encoding a float64 pointer as a Double. -func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendDouble wire encodes a float64 pointer as a Double. -func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float64() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v)) @@ -4615,13 +4615,13 @@ func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeDouble wire decodes a float64 pointer as a Double. -func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Float64() = math.Float64frombits(v) out.n = n @@ -4637,7 +4637,7 @@ var coderDouble = pointerCoderFuncs{ // sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. // The zero value is not encoded. -func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Float64() if v == 0 && !math.Signbit(float64(v)) { return 0 @@ -4647,7 +4647,7 @@ func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendDoubleNoZero wire encodes a float64 pointer as a Double. // The zero value is not encoded. -func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Float64() if v == 0 && !math.Signbit(float64(v)) { return b, nil @@ -4666,13 +4666,13 @@ var coderDoubleNoZero = pointerCoderFuncs{ // sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. // It panics if the pointer is nil. -func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { return f.tagsize + protowire.SizeFixed64() } // appendDoublePtr wire encodes a *float64 pointer as a Double. // It panics if the pointer is nil. -func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.Float64Ptr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v)) @@ -4680,13 +4680,13 @@ func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeDoublePtr wire decodes a *float64 pointer as a Double. -func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.Float64Ptr() if *vp == nil { @@ -4705,14 +4705,14 @@ var coderDoublePtr = pointerCoderFuncs{ } // sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. -func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float64Slice() size = len(s) * (f.tagsize + protowire.SizeFixed64()) return size } // appendDoubleSlice encodes a []float64 pointer as a repeated Double. -func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float64Slice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -4722,18 +4722,18 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. -func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } s = append(s, math.Float64frombits(v)) b = b[n:] @@ -4747,7 +4747,7 @@ func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, math.Float64frombits(v)) out.n = n @@ -4762,7 +4762,7 @@ var coderDoubleSlice = pointerCoderFuncs{ } // sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. -func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.Float64Slice() if len(s) == 0 { return 0 @@ -4772,7 +4772,7 @@ func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size } // appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. -func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.Float64Slice() if len(s) == 0 { return b, nil @@ -4794,25 +4794,25 @@ var coderDoublePackedSlice = pointerCoderFuncs{ } // sizeDoubleValue returns the size of wire encoding a float64 value as a Double. -func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeFixed64() } // appendDoubleValue encodes a float64 value as a Double. -func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) return b, nil } // consumeDoubleValue decodes a float64 value as a Double. -func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.Fixed64Type { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil @@ -4826,14 +4826,14 @@ var coderDoubleValue = valueCoderFuncs{ } // sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. -func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() size = list.Len() * (tagsize + protowire.SizeFixed64()) return size } // appendDoubleSliceValue encodes a []float64 value as a repeated Double. -func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -4844,17 +4844,17 @@ func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. -func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) b = b[n:] @@ -4867,7 +4867,7 @@ func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Num } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) out.n = n @@ -4882,7 +4882,7 @@ var coderDoubleSliceValue = valueCoderFuncs{ } // sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. -func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4893,7 +4893,7 @@ func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal } // appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. -func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() llen := list.Len() if llen == 0 { @@ -4917,13 +4917,13 @@ var coderDoublePackedSliceValue = valueCoderFuncs{ } // sizeString returns the size of wire encoding a string pointer as a String. -func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.String() return f.tagsize + protowire.SizeBytes(len(v)) } // appendString wire encodes a string pointer as a String. -func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -4931,15 +4931,15 @@ func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b } // consumeString wire decodes a string pointer as a String. -func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - *p.String() = v + *p.String() = string(v) out.n = n return out, nil } @@ -4952,7 +4952,7 @@ var coderString = pointerCoderFuncs{ } // appendStringValidateUTF8 wire encodes a string pointer as a String. -func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -4963,18 +4963,18 @@ func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalO } // consumeStringValidateUTF8 wire decodes a string pointer as a String. -func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } - *p.String() = v + *p.String() = string(v) out.n = n return out, nil } @@ -4988,7 +4988,7 @@ var coderStringValidateUTF8 = pointerCoderFuncs{ // sizeStringNoZero returns the size of wire encoding a string pointer as a String. // The zero value is not encoded. -func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.String() if len(v) == 0 { return 0 @@ -4998,7 +4998,7 @@ func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendStringNoZero wire encodes a string pointer as a String. // The zero value is not encoded. -func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() if len(v) == 0 { return b, nil @@ -5017,7 +5017,7 @@ var coderStringNoZero = pointerCoderFuncs{ // appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. // The zero value is not encoded. -func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.String() if len(v) == 0 { return b, nil @@ -5039,14 +5039,14 @@ var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ // sizeStringPtr returns the size of wire encoding a *string pointer as a String. // It panics if the pointer is nil. -func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := **p.StringPtr() return f.tagsize + protowire.SizeBytes(len(v)) } // appendStringPtr wire encodes a *string pointer as a String. // It panics if the pointer is nil. -func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.StringPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -5054,19 +5054,19 @@ func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ( } // consumeStringPtr wire decodes a *string pointer as a String. -func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } vp := p.StringPtr() if *vp == nil { *vp = new(string) } - **vp = v + **vp = string(v) out.n = n return out, nil } @@ -5080,7 +5080,7 @@ var coderStringPtr = pointerCoderFuncs{ // appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. // It panics if the pointer is nil. -func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := **p.StringPtr() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendString(b, v) @@ -5091,22 +5091,22 @@ func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marsh } // consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. -func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } vp := p.StringPtr() if *vp == nil { *vp = new(string) } - **vp = v + **vp = string(v) out.n = n return out, nil } @@ -5119,7 +5119,7 @@ var coderStringPtrValidateUTF8 = pointerCoderFuncs{ } // sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. -func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.StringSlice() for _, v := range s { size += f.tagsize + protowire.SizeBytes(len(v)) @@ -5128,7 +5128,7 @@ func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) } // appendStringSlice encodes a []string pointer as a repeated String. -func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.StringSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5138,16 +5138,16 @@ func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeStringSlice wire decodes a []string pointer as a repeated String. -func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.StringSlice() if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - *sp = append(*sp, v) + *sp = append(*sp, string(v)) out.n = n return out, nil } @@ -5160,7 +5160,7 @@ var coderStringSlice = pointerCoderFuncs{ } // appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. -func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.StringSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5173,19 +5173,19 @@ func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar } // consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. -func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.StringSlice() +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return out, errInvalidUTF8{} } - *sp = append(*sp, v) + sp := p.StringSlice() + *sp = append(*sp, string(v)) out.n = n return out, nil } @@ -5198,25 +5198,25 @@ var coderStringSliceValidateUTF8 = pointerCoderFuncs{ } // sizeStringValue returns the size of wire encoding a string value as a String. -func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeBytes(len(v.String())) } // appendStringValue encodes a string value as a String. -func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendString(b, v.String()) return b, nil } // consumeStringValue decodes a string value as a String. -func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfString(string(v)), out, nil @@ -5230,7 +5230,7 @@ var coderStringValue = valueCoderFuncs{ } // appendStringValueValidateUTF8 encodes a string value as a String. -func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendString(b, v.String()) if !utf8.ValidString(v.String()) { @@ -5240,15 +5240,15 @@ func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint6 } // consumeStringValueValidateUTF8 decodes a string value as a String. -func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } - if !utf8.ValidString(v) { + if !utf8.Valid(v) { return protoreflect.Value{}, out, errInvalidUTF8{} } out.n = n @@ -5263,7 +5263,7 @@ var coderStringValueValidateUTF8 = valueCoderFuncs{ } // sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. -func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5273,7 +5273,7 @@ func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOption } // appendStringSliceValue encodes a []string value as a repeated String. -func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5284,14 +5284,14 @@ func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, } // consumeStringSliceValue wire decodes a []string value as a repeated String. -func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } - v, n := protowire.ConsumeString(b) + v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfString(string(v))) out.n = n @@ -5306,13 +5306,13 @@ var coderStringSliceValue = valueCoderFuncs{ } // sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. -func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bytes() return f.tagsize + protowire.SizeBytes(len(v)) } // appendBytes wire encodes a []byte pointer as a Bytes. -func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendBytes(b, v) @@ -5320,13 +5320,13 @@ func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by } // consumeBytes wire decodes a []byte pointer as a Bytes. -func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bytes() = append(emptyBuf[:], v...) out.n = n @@ -5341,7 +5341,7 @@ var coderBytes = pointerCoderFuncs{ } // appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. -func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() b = protowire.AppendVarint(b, f.wiretag) b = protowire.AppendBytes(b, v) @@ -5352,13 +5352,13 @@ func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOp } // consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} @@ -5377,7 +5377,7 @@ var coderBytesValidateUTF8 = pointerCoderFuncs{ // sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. // The zero value is not encoded. -func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { v := *p.Bytes() if len(v) == 0 { return 0 @@ -5387,7 +5387,7 @@ func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) // appendBytesNoZero wire encodes a []byte pointer as a Bytes. // The zero value is not encoded. -func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() if len(v) == 0 { return b, nil @@ -5399,13 +5399,13 @@ func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) // consumeBytesNoZero wire decodes a []byte pointer as a Bytes. // The zero value is not decoded. -func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *p.Bytes() = append(([]byte)(nil), v...) out.n = n @@ -5421,7 +5421,7 @@ var coderBytesNoZero = pointerCoderFuncs{ // appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. // The zero value is not encoded. -func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { v := *p.Bytes() if len(v) == 0 { return b, nil @@ -5435,13 +5435,13 @@ func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar } // consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} @@ -5459,7 +5459,7 @@ var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ } // sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. -func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { +func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { s := *p.BytesSlice() for _, v := range s { size += f.tagsize + protowire.SizeBytes(len(v)) @@ -5468,7 +5468,7 @@ func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { } // appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BytesSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5478,14 +5478,14 @@ func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) } // consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BytesSlice() if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } *sp = append(*sp, append(emptyBuf[:], v...)) out.n = n @@ -5500,7 +5500,7 @@ var coderBytesSlice = pointerCoderFuncs{ } // appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { s := *p.BytesSlice() for _, v := range s { b = protowire.AppendVarint(b, f.wiretag) @@ -5513,18 +5513,18 @@ func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mars } // consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.BytesSlice() +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !utf8.Valid(v) { return out, errInvalidUTF8{} } + sp := p.BytesSlice() *sp = append(*sp, append(emptyBuf[:], v...)) out.n = n return out, nil @@ -5538,25 +5538,25 @@ var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ } // sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. -func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { +func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { return tagsize + protowire.SizeBytes(len(v.Bytes())) } // appendBytesValue encodes a []byte value as a Bytes. -func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { b = protowire.AppendVarint(b, wiretag) b = protowire.AppendBytes(b, v.Bytes()) return b, nil } // consumeBytesValue decodes a []byte value as a Bytes. -func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } out.n = n return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil @@ -5570,7 +5570,7 @@ var coderBytesValue = valueCoderFuncs{ } // sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. -func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5580,7 +5580,7 @@ func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions } // appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. -func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { v := list.Get(i) @@ -5591,14 +5591,14 @@ func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ } // consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. -func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return protoreflect.Value{}, out, protowire.ParseError(n) + return protoreflect.Value{}, out, errDecode } list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) out.n = n diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 44885a761f6c..c1245fef4876 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -5,7 +5,6 @@ package impl import ( - "errors" "reflect" "sort" @@ -118,7 +117,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo } b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var ( key = mapi.keyZero @@ -127,10 +126,10 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if num > protowire.MaxValidNumber { - return out, errors.New("invalid field number") + return out, errDecode } b = b[n:] err := errUnknown @@ -157,7 +156,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } } else if err != nil { return out, err @@ -175,7 +174,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi } b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var ( key = mapi.keyZero @@ -184,10 +183,10 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if num > protowire.MaxValidNumber { - return out, errors.New("invalid field number") + return out, errDecode } b = b[n:] err := errUnknown @@ -208,7 +207,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi var v []byte v, n = protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } var o unmarshalOutput o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) @@ -221,7 +220,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } } else if err != nil { return out, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 0e176d565d40..cd40527ff646 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -11,7 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/fieldsort" + "google.golang.org/protobuf/internal/order" pref "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -27,6 +27,7 @@ type coderMessageInfo struct { coderFields map[protowire.Number]*coderFieldInfo sizecacheOffset offset unknownOffset offset + unknownPtrKind bool extensionOffset offset needsInitCheck bool isMessageSet bool @@ -47,9 +48,20 @@ type coderFieldInfo struct { } func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { - mi.sizecacheOffset = si.sizecacheOffset - mi.unknownOffset = si.unknownOffset - mi.extensionOffset = si.extensionOffset + mi.sizecacheOffset = invalidOffset + mi.unknownOffset = invalidOffset + mi.extensionOffset = invalidOffset + + if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { + mi.sizecacheOffset = si.sizecacheOffset + } + if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) { + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + } + if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType { + mi.extensionOffset = si.extensionOffset + } mi.coderFields = make(map[protowire.Number]*coderFieldInfo) fields := mi.Desc.Fields() @@ -73,6 +85,27 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { var funcs pointerCoderFuncs var childMessage *MessageInfo switch { + case ft == nil: + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 0 + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return nil, nil + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + panic("missing Go struct field for " + string(fd.FullName())) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + } case isOneof: fieldOffset = offsetOf(fs, mi.Exporter) case fd.IsWeak(): @@ -136,7 +169,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { sort.Slice(mi.orderedCoderFields, func(i, j int) bool { fi := fields.ByNumber(mi.orderedCoderFields[i].num) fj := fields.ByNumber(mi.orderedCoderFields[j].num) - return fieldsort.Less(fi, fj) + return order.LegacyFieldOrder(fi, fj) }) } @@ -157,3 +190,28 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.methods.Merge = mi.merge } } + +// getUnknownBytes returns a *[]byte for the unknown fields. +// It is the caller's responsibility to check whether the pointer is nil. +// This function is specially designed to be inlineable. +func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + return *p.Apply(mi.unknownOffset).BytesPtr() + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} + +// mutableUnknownBytes returns a *[]byte for the unknown fields. +// The returned pointer is guaranteed to not be nil. +func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + return *bp + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index cfb68e12fbf5..b7a23faf1e43 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -29,8 +29,9 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } - unknown := *p.Apply(mi.unknownOffset).Bytes() - size += messageset.SizeUnknown(unknown) + if u := mi.getUnknownBytes(p); u != nil { + size += messageset.SizeUnknown(*u) + } return size } @@ -69,10 +70,12 @@ func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions } } - unknown := *p.Apply(mi.unknownOffset).Bytes() - b, err := messageset.AppendUnknown(b, unknown) - if err != nil { - return b, err + if u := mi.getUnknownBytes(p); u != nil { + var err error + b, err = messageset.AppendUnknown(b, *u) + if err != nil { + return b, err + } } return b, nil @@ -100,13 +103,13 @@ func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOpt *ep = make(map[int32]ExtensionField) } ext := *ep - unknown := p.Apply(mi.unknownOffset).Bytes() initialized := true err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) if err == errUnknown { - *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType) - *unknown = append(*unknown, v...) + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, protowire.BytesType) + *u = append(*u, v...) return nil } if !o.initialized { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 86f7dc3c9d77..90705e3aea74 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -30,7 +30,7 @@ func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ } v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } p.v.Elem().SetInt(int64(v)) out.n = n @@ -130,12 +130,12 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf if wtyp == protowire.BytesType { b, n := protowire.ConsumeBytes(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } for len(b) > 0 { v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } rv := reflect.New(s.Type().Elem()).Elem() rv.SetInt(int64(v)) @@ -150,7 +150,7 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf } v, n := protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } rv := reflect.New(s.Type().Elem()).Elem() rv.SetInt(int64(v)) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 36a90dff3816..acd61bb50b2c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -423,6 +423,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } + if c.isNonPointer() { + if v.CanAddr() { + v = v.Addr() // T => *T + } else { + v = reflect.Zero(reflect.PtrTo(v.Type())) + } + } if m, ok := v.Interface().(pref.ProtoMessage); ok { return pref.ValueOfMessage(m.ProtoReflect()) } @@ -437,6 +444,16 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { } else { rv = reflect.ValueOf(m.Interface()) } + if c.isNonPointer() { + if rv.Type() != reflect.PtrTo(c.goType) { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType))) + } + if !rv.IsNil() { + rv = rv.Elem() // *T => T + } else { + rv = reflect.Zero(rv.Type().Elem()) + } + } if rv.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) } @@ -451,6 +468,9 @@ func (c *messageConverter) IsValidPB(v pref.Value) bool { } else { rv = reflect.ValueOf(m.Interface()) } + if c.isNonPointer() { + return rv.Type() == reflect.PtrTo(c.goType) + } return rv.Type() == c.goType } @@ -459,9 +479,18 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { } func (c *messageConverter) New() pref.Value { + if c.isNonPointer() { + return c.PBValueOf(reflect.New(c.goType).Elem()) + } return c.PBValueOf(reflect.New(c.goType.Elem())) } func (c *messageConverter) Zero() pref.Value { return c.PBValueOf(reflect.Zero(c.goType)) } + +// isNonPointer reports whether the type is a non-pointer type. +// This never occurs for generated message types. +func (c *messageConverter) isNonPointer() bool { + return c.goType.Kind() != reflect.Ptr +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 85ba1d3b3345..949dc49a65b3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -17,6 +17,8 @@ import ( piface "google.golang.org/protobuf/runtime/protoiface" ) +var errDecode = errors.New("cannot parse invalid wire-format data") + type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags resolver interface { @@ -100,13 +102,13 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. var n int tag, n = protowire.ConsumeVarint(b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } b = b[n:] } var num protowire.Number if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { - return out, errors.New("invalid field number") + return out, errDecode } else { num = protowire.Number(n) } @@ -114,7 +116,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if wtyp == protowire.EndGroupType { if num != groupTag { - return out, errors.New("mismatching end group marker") + return out, errDecode } groupTag = 0 break @@ -170,10 +172,10 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. } n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return out, protowire.ParseError(n) + return out, errDecode } if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { - u := p.Apply(mi.unknownOffset).Bytes() + u := mi.mutableUnknownBytes(p) *u = protowire.AppendTag(*u, num, wtyp) *u = append(*u, b[:n]...) } @@ -181,7 +183,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. b = b[n:] } if groupTag != 0 { - return out, errors.New("missing end group marker") + return out, errDecode } if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { initialized = false @@ -221,7 +223,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, nil } case ValidationInvalid: - return out, errors.New("invalid wire format") + return out, errDecode case ValidationUnknown: } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 8c8a794c631d..845c67d6e7e5 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -79,8 +79,9 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int size += f.funcs.size(fptr, f, opts) } if mi.unknownOffset.IsValid() { - u := *p.Apply(mi.unknownOffset).Bytes() - size += len(u) + if u := mi.getUnknownBytes(p); u != nil { + size += len(*u) + } } if mi.sizecacheOffset.IsValid() { if size > math.MaxInt32 { @@ -141,8 +142,9 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt } } if mi.unknownOffset.IsValid() && !mi.isMessageSet { - u := *p.Apply(mi.unknownOffset).Bytes() - b = append(b, u...) + if u := mi.getUnknownBytes(p); u != nil { + b = append(b, (*u)...) + } } return b, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index c3d741c2f0c5..e3fb0b578586 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -30,7 +30,7 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } - return legacyLoadMessageInfo(reflect.TypeOf(m), name) + return legacyLoadMessageType(reflect.TypeOf(m), name) } // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 61757ce50a78..49e723161c01 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -154,7 +154,8 @@ func (x placeholderExtension) Number() pref.FieldNumber { retu func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } func (x placeholderExtension) Kind() pref.Kind { return 0 } func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "" } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } func (x placeholderExtension) HasPresence() bool { return false } func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 06c68e117026..3759b010c0cb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -24,14 +24,24 @@ import ( // legacyWrapMessage wraps v as a protoreflect.Message, // where v must be a *struct kind and not implement the v2 API already. func legacyWrapMessage(v reflect.Value) pref.Message { - typ := v.Type() - if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct { + t := v.Type() + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessage{v: v} } - mt := legacyLoadMessageInfo(typ, "") + mt := legacyLoadMessageInfo(t, "") return mt.MessageOf(v.Interface()) } +// legacyLoadMessageType dynamically loads a protoreflect.Type for t, +// where t must be not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessageType{t} + } + return legacyLoadMessageInfo(t, name) +} + var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, @@ -49,8 +59,9 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { GoReflectType: t, } + var hasMarshal, hasUnmarshal bool v := reflect.Zero(t).Interface() - if _, ok := v.(legacyMarshaler); ok { + if _, hasMarshal = v.(legacyMarshaler); hasMarshal { mi.methods.Marshal = legacyMarshal // We have no way to tell whether the type's Marshal method @@ -59,10 +70,10 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { // calling Marshal methods when present. mi.methods.Flags |= piface.SupportMarshalDeterministic } - if _, ok := v.(legacyUnmarshaler); ok { + if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { mi.methods.Unmarshal = legacyUnmarshal } - if _, ok := v.(legacyMerger); ok { + if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) { mi.methods.Merge = legacyMerge } @@ -75,7 +86,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor // LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, -// which must be a *struct kind and not implement the v2 API already. +// which should be a *struct kind and must not implement the v2 API already. // // This is exported for testing purposes. func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { @@ -114,17 +125,19 @@ func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescr // If the Go type has no fields, then this might be a proto3 empty message // from before the size cache was added. If there are any fields, check to // see that at least one of them looks like something we generated. - if nfield := t.Elem().NumField(); nfield > 0 { - hasProtoField := false - for i := 0; i < nfield; i++ { - f := t.Elem().Field(i) - if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { - hasProtoField = true - break + if t.Elem().Kind() == reflect.Struct { + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) } - } - if !hasProtoField { - return aberrantLoadMessageDesc(t, name) } } @@ -370,7 +383,7 @@ type legacyMerger interface { Merge(protoiface.MessageV1) } -var legacyProtoMethods = &piface.Methods{ +var aberrantProtoMethods = &piface.Methods{ Marshal: legacyMarshal, Unmarshal: legacyUnmarshal, Merge: legacyMerge, @@ -401,18 +414,40 @@ func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() unmarshaler, ok := v.(legacyUnmarshaler) if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v) + return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) } return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) } func legacyMerge(in piface.MergeInput) piface.MergeOutput { + // Check whether this supports the legacy merger. dstv := in.Destination.(unwrapper).protoUnwrap() merger, ok := dstv.(legacyMerger) + if ok { + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return piface.MergeOutput{Flags: piface.MergeComplete} + } + + // If legacy merger is unavailable, implement merge in terms of + // a marshal and unmarshal operation. + srcv := in.Source.(unwrapper).protoUnwrap() + marshaler, ok := srcv.(legacyMarshaler) if !ok { return piface.MergeOutput{} } - merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + dstv = in.Destination.(unwrapper).protoUnwrap() + unmarshaler, ok := dstv.(legacyUnmarshaler) + if !ok { + return piface.MergeOutput{} + } + b, err := marshaler.Marshal() + if err != nil { + return piface.MergeOutput{} + } + err = unmarshaler.Unmarshal(b) + if err != nil { + return piface.MergeOutput{} + } return piface.MergeOutput{Flags: piface.MergeComplete} } @@ -422,6 +457,9 @@ type aberrantMessageType struct { } func (mt aberrantMessageType) New() pref.Message { + if mt.t.Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(mt.t.Elem())} + } return aberrantMessage{reflect.Zero(mt.t)} } func (mt aberrantMessageType) Zero() pref.Message { @@ -443,6 +481,17 @@ type aberrantMessage struct { v reflect.Value } +// Reset implements the v1 proto.Message.Reset method. +func (m aberrantMessage) Reset() { + if mr, ok := m.v.Interface().(interface{ Reset() }); ok { + mr.Reset() + return + } + if m.v.Kind() == reflect.Ptr && !m.v.IsNil() { + m.v.Elem().Set(reflect.Zero(m.v.Type().Elem())) + } +} + func (m aberrantMessage) ProtoReflect() pref.Message { return m } @@ -454,33 +503,40 @@ func (m aberrantMessage) Type() pref.MessageType { return aberrantMessageType{m.v.Type()} } func (m aberrantMessage) New() pref.Message { + if m.v.Type().Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(m.v.Type().Elem())} + } return aberrantMessage{reflect.Zero(m.v.Type())} } func (m aberrantMessage) Interface() pref.ProtoMessage { return m } func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + return } func (m aberrantMessage) Has(pref.FieldDescriptor) bool { - panic("invalid field descriptor") + return false } func (m aberrantMessage) Clear(pref.FieldDescriptor) { - panic("invalid field descriptor") + panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") +func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { + if fd.Default().IsValid() { + return fd.Default() + } + panic("invalid Message.Get on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { - panic("invalid field descriptor") + panic("invalid Message.Set on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") + panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { - panic("invalid field descriptor") + panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { - panic("invalid oneof descriptor") + panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) } func (m aberrantMessage) GetUnknown() pref.RawFields { return nil @@ -489,13 +545,13 @@ func (m aberrantMessage) SetUnknown(pref.RawFields) { // SetUnknown discards its input on messages which don't support unknown field storage. } func (m aberrantMessage) IsValid() bool { - // An invalid message is a read-only, empty message. Since we don't know anything - // about the alleged contents of this message, we can't say with confidence that - // it is invalid in this sense. Therefore, report it as valid. - return true + if m.v.Kind() == reflect.Ptr { + return !m.v.IsNil() + } + return false } func (m aberrantMessage) ProtoMethods() *piface.Methods { - return legacyProtoMethods + return aberrantProtoMethods } func (m aberrantMessage) protoUnwrap() interface{} { return m.v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index cdc4267dfadf..c65bbc0446ea 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -77,9 +77,9 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { } } if mi.unknownOffset.IsValid() { - du := dst.Apply(mi.unknownOffset).Bytes() - su := src.Apply(mi.unknownOffset).Bytes() - if len(*su) > 0 { + su := mi.getUnknownBytes(src) + if su != nil && len(*su) > 0 { + du := mi.mutableUnknownBytes(dst) *du = append(*du, *su...) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index c026a98180d8..a104e28e858f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -109,22 +110,29 @@ func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { type ( SizeCache = int32 WeakFields = map[int32]protoreflect.ProtoMessage - UnknownFields = []byte + UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB + unknownFieldsA = []byte + unknownFieldsB = *[]byte ExtensionFields = map[int32]ExtensionField ) var ( sizecacheType = reflect.TypeOf(SizeCache(0)) weakFieldsType = reflect.TypeOf(WeakFields(nil)) - unknownFieldsType = reflect.TypeOf(UnknownFields(nil)) + unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) + unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) ) type structInfo struct { sizecacheOffset offset + sizecacheType reflect.Type weakOffset offset + weakType reflect.Type unknownOffset offset + unknownType reflect.Type extensionOffset offset + extensionType reflect.Type fieldsByNumber map[pref.FieldNumber]reflect.StructField oneofsByName map[pref.Name]reflect.StructField @@ -151,18 +159,22 @@ fieldLoop: case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheType = f.Type } case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { si.weakOffset = offsetOf(f, mi.Exporter) + si.weakType = f.Type } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: - if f.Type == unknownFieldsType { + if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionType = f.Type } default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { @@ -212,4 +224,53 @@ func (mi *MessageInfo) New() protoreflect.Message { func (mi *MessageInfo) Zero() protoreflect.Message { return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) } -func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc } +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { + return mi.Desc +} +func (mi *MessageInfo) Enum(i int) protoreflect.EnumType { + mi.init() + fd := mi.Desc.Fields().Get(i) + return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()]) +} +func (mi *MessageInfo) Message(i int) protoreflect.MessageType { + mi.init() + fd := mi.Desc.Fields().Get(i) + switch { + case fd.IsWeak(): + mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + return mt + case fd.IsMap(): + return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} + default: + return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()]) + } +} + +type mapEntryType struct { + desc protoreflect.MessageDescriptor + valType interface{} // zero value of enum or message type +} + +func (mt mapEntryType) New() protoreflect.Message { + return nil +} +func (mt mapEntryType) Zero() protoreflect.Message { + return nil +} +func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor { + return mt.desc +} +func (mt mapEntryType) Enum(i int) protoreflect.EnumType { + fd := mt.desc.Fields().Get(i) + if fd.Enum() == nil { + return nil + } + return Export{}.EnumTypeOf(mt.valType) +} +func (mt mapEntryType) Message(i int) protoreflect.MessageType { + fd := mt.desc.Fields().Get(i) + if fd.Message() == nil { + return nil + } + return Export{}.MessageTypeOf(mt.valType) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 0f4b8db760aa..9488b7261313 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -8,6 +8,7 @@ import ( "fmt" "reflect" + "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" pref "google.golang.org/protobuf/reflect/protoreflect" ) @@ -16,6 +17,11 @@ type reflectMessageInfo struct { fields map[pref.FieldNumber]*fieldInfo oneofs map[pref.Name]*oneofInfo + // fieldTypes contains the zero value of an enum or message field. + // For lists, it contains the element type. + // For maps, it contains the entry value type. + fieldTypes map[pref.FieldNumber]interface{} + // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) // It provides faster access to the fieldInfo, but may be incomplete. @@ -36,6 +42,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { mi.makeKnownFieldsFunc(si) mi.makeUnknownFieldsFunc(t, si) mi.makeExtensionFieldsFunc(t, si) + mi.makeFieldTypes(si) } // makeKnownFieldsFunc generates functions for operations that can be performed @@ -51,17 +58,23 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { for i := 0; i < fds.Len(); i++ { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } var fi fieldInfo switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fs.Type == nil: + fi = fieldInfoForMissing(fd) // never occurs for officially generated message types + case isOneof: + fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) case fd.IsMap(): fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) case fd.IsWeak(): fi = fieldInfoForWeakMessage(fd, si.weakOffset) - case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind: + case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: fi = fieldInfoForScalar(fd, fs, mi.Exporter) @@ -92,27 +105,53 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { i++ } } + + // Introduce instability to iteration order, but keep it deterministic. + if len(mi.rangeInfos) > 1 && detrand.Bool() { + i := detrand.Intn(len(mi.rangeInfos) - 1) + mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i] + } } func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { - mi.getUnknown = func(pointer) pref.RawFields { return nil } - mi.setUnknown = func(pointer, pref.RawFields) { return } - if si.unknownOffset.IsValid() { + switch { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: + // Handle as []byte. mi.getUnknown = func(p pointer) pref.RawFields { if p.IsNil() { return nil } - rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) - return pref.RawFields(*rv.Interface().(*[]byte)) + return *p.Apply(mi.unknownOffset).Bytes() } mi.setUnknown = func(p pointer, b pref.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } - rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) - *rv.Interface().(*[]byte) = []byte(b) + *p.Apply(mi.unknownOffset).Bytes() = b } - } else { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: + // Handle as *[]byte. + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + return nil + } + return **bp + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + **bp = b + } + default: mi.getUnknown = func(pointer) pref.RawFields { return nil } @@ -139,6 +178,58 @@ func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { } } } +func (mi *MessageInfo) makeFieldTypes(si structInfo) { + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + var ft reflect.Type + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var isMessage bool + switch { + case fs.Type == nil: + continue // never occurs for officially generated message types + case isOneof: + if fd.Enum() != nil || fd.Message() != nil { + ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type + } + case fd.IsMap(): + if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.MapValue().Message() != nil + case fd.IsList(): + if fd.Enum() != nil || fd.Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.Message() != nil + case fd.Enum() != nil: + ft = fs.Type + if fd.HasPresence() && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + case fd.Message() != nil: + ft = fs.Type + if fd.IsWeak() { + ft = nil + } + isMessage = true + } + if isMessage && ft != nil && ft.Kind() != reflect.Ptr { + ft = reflect.PtrTo(ft) // never occurs for officially generated message types + } + if ft != nil { + if mi.fieldTypes == nil { + mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + } + mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() + } + } +} type extensionMap map[int32]ExtensionField @@ -306,7 +397,6 @@ var ( // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { - // TODO: Switch the input to be an opaque Pointer. if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -320,6 +410,17 @@ func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { func (m *messageReflectWrapper) pointer() pointer { return m.p } func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } +// Reset implements the v1 proto.Message.Reset method. +func (m *messageIfaceWrapper) Reset() { + if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok { + mr.Reset() + return + } + rv := reflect.ValueOf(m.protoUnwrap()) + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } +} func (m *messageIfaceWrapper) ProtoReflect() pref.Message { return (*messageReflectWrapper)(m) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 23124a86e40c..343cf872197f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -28,6 +28,39 @@ type fieldInfo struct { newField func() pref.Value } +func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + return false + }, + clear: func(p pointer) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + get: func(p pointer) pref.Value { + return fd.Default() + }, + set: func(p pointer, v pref.Value) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + mutable: func(p pointer) pref.Value { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newMessage: func() pref.Message { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newField: func() pref.Value { + if v := fd.Default(); v.IsValid() { + return v + } + panic("missing Go struct field for " + string(fd.FullName())) + }, + } +} + func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Interface { @@ -97,7 +130,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv.Set(reflect.New(ot)) } rv = rv.Elem().Elem().Field(0) - if rv.IsNil() { + if rv.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) } return conv.PBValueOf(rv) @@ -225,7 +258,10 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { - panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft)) + // This never occurs for generated message types. + // Despite the protobuf type system specifying presence, + // the Go field type cannot represent it. + nullable = false } if ft.Kind() == reflect.Ptr { ft = ft.Elem() @@ -388,6 +424,9 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo return false } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() != reflect.Ptr { + return !isZero(rv) + } return !rv.IsNil() }, clear: func(p pointer) { @@ -404,13 +443,13 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo set: func(p pointer, v pref.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(conv.GoValueOf(v)) - if rv.IsNil() { + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) } }, mutable: func(p pointer) pref.Value { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() { + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(conv.New())) } return conv.PBValueOf(rv) @@ -464,3 +503,41 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf } return oi } + +// isZero is identical to reflect.Value.IsZero. +// TODO: Remove this when Go1.13 is the minimally supported Go version. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + default: + panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 67b4ede6705f..9e3ed821efb3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -121,6 +121,7 @@ func (p pointer) String() *string { return p.v.Interface().(*string) } func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } func (p pointer) Extensions() *map[int32]ExtensionField { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 088aa85d483d..9ecf23a85bb7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -109,6 +109,7 @@ func (p pointer) String() *string { return (*string)(p.p) func (p pointer) StringPtr() **string { return (**string)(p.p) } func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go deleted file mode 100644 index a3de1cf32411..000000000000 --- a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mapsort provides sorted access to maps. -package mapsort - -import ( - "sort" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Range iterates over every map entry in sorted key order, -// calling f for each key and value encountered. -func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { - var keys []protoreflect.MapKey - mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool { - keys = append(keys, key) - return true - }) - sort.Slice(keys, func(i, j int) bool { - switch keyKind { - case protoreflect.BoolKind: - return !keys[i].Bool() && keys[j].Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, - protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return keys[i].Int() < keys[j].Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, - protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return keys[i].Uint() < keys[j].Uint() - case protoreflect.StringKind: - return keys[i].String() < keys[j].String() - default: - panic("invalid kind: " + keyKind.String()) - } - }) - for _, key := range keys { - if !f(key, mapv.Get(key)) { - break - } - } -} diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go new file mode 100644 index 000000000000..2a24953f6a47 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -0,0 +1,89 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package order + +import ( + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// FieldOrder specifies the ordering to visit message fields. +// It is a function that reports whether x is ordered before y. +type FieldOrder func(x, y pref.FieldDescriptor) bool + +var ( + // AnyFieldOrder specifies no specific field ordering. + AnyFieldOrder FieldOrder = nil + + // LegacyFieldOrder sorts fields in the same ordering as emitted by + // wire serialization in the github.com/golang/protobuf implementation. + LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + ox, oy := x.ContainingOneof(), y.ContainingOneof() + inOneof := func(od pref.OneofDescriptor) bool { + return od != nil && !od.IsSynthetic() + } + + // Extension fields sort before non-extension fields. + if x.IsExtension() != y.IsExtension() { + return x.IsExtension() && !y.IsExtension() + } + // Fields not within a oneof sort before those within a oneof. + if inOneof(ox) != inOneof(oy) { + return !inOneof(ox) && inOneof(oy) + } + // Fields in disjoint oneof sets are sorted by declaration index. + if ox != nil && oy != nil && ox != oy { + return ox.Index() < oy.Index() + } + // Fields sorted by field number. + return x.Number() < y.Number() + } + + // NumberFieldOrder sorts fields by their field number. + NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + return x.Number() < y.Number() + } + + // IndexNameFieldOrder sorts non-extension fields before extension fields. + // Non-extensions are sorted according to their declaration index. + // Extensions are sorted according to their full name. + IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + // Non-extension fields sort before extension fields. + if x.IsExtension() != y.IsExtension() { + return !x.IsExtension() && y.IsExtension() + } + // Extensions sorted by fullname. + if x.IsExtension() && y.IsExtension() { + return x.FullName() < y.FullName() + } + // Non-extensions sorted by declaration index. + return x.Index() < y.Index() + } +) + +// KeyOrder specifies the ordering to visit map entries. +// It is a function that reports whether x is ordered before y. +type KeyOrder func(x, y pref.MapKey) bool + +var ( + // AnyKeyOrder specifies no specific key ordering. + AnyKeyOrder KeyOrder = nil + + // GenericKeyOrder sorts false before true, numeric keys in ascending order, + // and strings in lexicographical ordering according to UTF-8 codepoints. + GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + switch x.Interface().(type) { + case bool: + return !x.Bool() && y.Bool() + case int32, int64: + return x.Int() < y.Int() + case uint32, uint64: + return x.Uint() < y.Uint() + case string: + return x.String() < y.String() + default: + panic("invalid map key type") + } + } +) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go new file mode 100644 index 000000000000..c8090e0c547f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -0,0 +1,115 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package order provides ordered access to messages and maps. +package order + +import ( + "sort" + "sync" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type messageField struct { + fd pref.FieldDescriptor + v pref.Value +} + +var messageFieldPool = sync.Pool{ + New: func() interface{} { return new([]messageField) }, +} + +type ( + // FieldRnger is an interface for visiting all fields in a message. + // The protoreflect.Message type implements this interface. + FieldRanger interface{ Range(VisitField) } + // VisitField is called everytime a message field is visited. + VisitField = func(pref.FieldDescriptor, pref.Value) bool +) + +// RangeFields iterates over the fields of fs according to the specified order. +func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { + if less == nil { + fs.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := messageFieldPool.Get().(*[]messageField) + fields := (*p)[:0] + defer func() { + if cap(fields) < 1024 { + *p = fields + messageFieldPool.Put(p) + } + }() + + // Collect all fields in the message and sort them. + fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fields = append(fields, messageField{fd, v}) + return true + }) + sort.Slice(fields, func(i, j int) bool { + return less(fields[i].fd, fields[j].fd) + }) + + // Visit the fields in the specified ordering. + for _, f := range fields { + if !fn(f.fd, f.v) { + return + } + } +} + +type mapEntry struct { + k pref.MapKey + v pref.Value +} + +var mapEntryPool = sync.Pool{ + New: func() interface{} { return new([]mapEntry) }, +} + +type ( + // EntryRanger is an interface for visiting all fields in a message. + // The protoreflect.Map type implements this interface. + EntryRanger interface{ Range(VisitEntry) } + // VisitEntry is called everytime a map entry is visited. + VisitEntry = func(pref.MapKey, pref.Value) bool +) + +// RangeEntries iterates over the entries of es according to the specified order. +func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { + if less == nil { + es.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := mapEntryPool.Get().(*[]mapEntry) + entries := (*p)[:0] + defer func() { + if cap(entries) < 1024 { + *p = entries + mapEntryPool.Put(p) + } + }() + + // Collect all entries in the map and sort them. + es.Range(func(k pref.MapKey, v pref.Value) bool { + entries = append(entries, mapEntry{k, v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + return less(entries[i].k, entries[j].k) + }) + + // Visit the entries in the specified ordering. + for _, e := range entries { + if !fn(e.k, e.v) { + return + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 72cf770b427f..5879131da788 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 25 + Minor = 26 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 42fc5195e87e..49f9b8c88cfd 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -45,12 +45,14 @@ type UnmarshalOptions struct { } // Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { _, err := o.unmarshal(b, m.ProtoReflect()) return err @@ -116,10 +118,10 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) // Parse the tag (field number and wire type). num, wtyp, tagLen := protowire.ConsumeTag(b) if tagLen < 0 { - return protowire.ParseError(tagLen) + return errDecode } if num > protowire.MaxValidNumber { - return errors.New("invalid field number") + return errDecode } // Find the field descriptor for this field number. @@ -159,7 +161,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) } valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) if valLen < 0 { - return protowire.ParseError(valLen) + return errDecode } if !o.DiscardUnknown { m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) @@ -194,7 +196,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto } b, n = protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } var ( keyField = fd.MapKey() @@ -213,10 +215,10 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto for len(b) > 0 { num, wtyp, n := protowire.ConsumeTag(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } if num > protowire.MaxValidNumber { - return 0, errors.New("invalid field number") + return 0, errDecode } b = b[n:] err = errUnknown @@ -246,7 +248,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto if err == errUnknown { n = protowire.ConsumeFieldValue(num, wtyp, b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } } else if err != nil { return 0, err @@ -272,3 +274,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto // to the unknown field set of a message. It is never returned from an exported // function. var errUnknown = errors.New("BUG: internal error (unknown)") + +var errDecode = errors.New("cannot parse invalid wire-format data") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go index d6dc904dccf4..301eeb20f82f 100644 --- a/vendor/google.golang.org/protobuf/proto/decode_gen.go +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -27,7 +27,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil case protoreflect.EnumKind: @@ -36,7 +36,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil case protoreflect.Int32Kind: @@ -45,7 +45,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(v)), n, nil case protoreflect.Sint32Kind: @@ -54,7 +54,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil case protoreflect.Uint32Kind: @@ -63,7 +63,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint32(uint32(v)), n, nil case protoreflect.Int64Kind: @@ -72,7 +72,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(int64(v)), n, nil case protoreflect.Sint64Kind: @@ -81,7 +81,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil case protoreflect.Uint64Kind: @@ -90,7 +90,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint64(v), n, nil case protoreflect.Sfixed32Kind: @@ -99,7 +99,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt32(int32(v)), n, nil case protoreflect.Fixed32Kind: @@ -108,7 +108,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint32(uint32(v)), n, nil case protoreflect.FloatKind: @@ -117,7 +117,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil case protoreflect.Sfixed64Kind: @@ -126,7 +126,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfInt64(int64(v)), n, nil case protoreflect.Fixed64Kind: @@ -135,7 +135,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfUint64(v), n, nil case protoreflect.DoubleKind: @@ -144,7 +144,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil case protoreflect.StringKind: @@ -153,7 +153,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } if strs.EnforceUTF8(fd) && !utf8.Valid(v) { return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) @@ -165,7 +165,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil case protoreflect.MessageKind: @@ -174,7 +174,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(v), n, nil case protoreflect.GroupKind: @@ -183,7 +183,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot } v, n := protowire.ConsumeGroup(fd.Number(), b) if n < 0 { - return val, 0, protowire.ParseError(n) + return val, 0, errDecode } return protoreflect.ValueOfBytes(v), n, nil default: @@ -197,12 +197,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) @@ -214,7 +214,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) return n, nil @@ -222,12 +222,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) @@ -239,7 +239,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) return n, nil @@ -247,12 +247,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(v))) @@ -264,7 +264,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) return n, nil @@ -272,12 +272,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) @@ -289,7 +289,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) return n, nil @@ -297,12 +297,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint32(uint32(v))) @@ -314,7 +314,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) return n, nil @@ -322,12 +322,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(int64(v))) @@ -339,7 +339,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) return n, nil @@ -347,12 +347,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) @@ -364,7 +364,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) return n, nil @@ -372,12 +372,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeVarint(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint64(v)) @@ -389,7 +389,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeVarint(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint64(v)) return n, nil @@ -397,12 +397,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt32(int32(v))) @@ -414,7 +414,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt32(int32(v))) return n, nil @@ -422,12 +422,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint32(uint32(v))) @@ -439,7 +439,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint32(uint32(v))) return n, nil @@ -447,12 +447,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed32(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) @@ -464,7 +464,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed32(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) return n, nil @@ -472,12 +472,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfInt64(int64(v))) @@ -489,7 +489,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfInt64(int64(v))) return n, nil @@ -497,12 +497,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfUint64(v)) @@ -514,7 +514,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfUint64(v)) return n, nil @@ -522,12 +522,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot if wtyp == protowire.BytesType { buf, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } for len(buf) > 0 { v, n := protowire.ConsumeFixed64(buf) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } buf = buf[n:] list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) @@ -539,7 +539,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeFixed64(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) return n, nil @@ -549,7 +549,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } if strs.EnforceUTF8(fd) && !utf8.Valid(v) { return 0, errors.InvalidUTF8(string(fd.FullName())) @@ -562,7 +562,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) return n, nil @@ -572,7 +572,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeBytes(b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } m := list.NewElement() if err := o.unmarshalMessage(v, m.Message()); err != nil { @@ -586,7 +586,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot } v, n := protowire.ConsumeGroup(fd.Number(), b) if n < 0 { - return 0, protowire.ParseError(n) + return 0, errDecode } m := list.NewElement() if err := o.unmarshalMessage(v, m.Message()); err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 7b47a1180e4d..d18239c23723 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,9 @@ package proto import ( - "sort" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/fieldsort" - "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" @@ -211,14 +208,15 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([] if messageset.IsMessageSet(m.Descriptor()) { return o.marshalMessageSet(b, m) } - // There are many choices for what order we visit fields in. The default one here - // is chosen for reasonable efficiency and simplicity given the protoreflect API. - // It is not deterministic, since Message.Range does not return fields in any - // defined order. - // - // When using deterministic serialization, we sort the known fields. + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + // TODO: This should use a more natural ordering like NumberFieldOrder, + // but doing so breaks golden tests that make invalid assumption about + // output stability of this implementation. + fieldOrder = order.LegacyFieldOrder + } var err error - o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { b, err = o.marshalField(b, fd, v) return err == nil }) @@ -229,27 +227,6 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([] return b, nil } -// rangeFields visits fields in a defined order when deterministic serialization is enabled. -func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - if !o.Deterministic { - m.Range(f) - return - } - var fds []protoreflect.FieldDescriptor - m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - fds = append(fds, fd) - return true - }) - sort.Slice(fds, func(a, b int) bool { - return fieldsort.Less(fds[a], fds[b]) - }) - for _, fd := range fds { - if !f(fd, m.Get(fd)) { - break - } - } -} - func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { switch { case fd.IsList(): @@ -292,8 +269,12 @@ func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, l func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { keyf := fd.MapKey() valf := fd.MapValue() + keyOrder := order.AnyKeyOrder + if o.Deterministic { + keyOrder = order.GenericKeyOrder + } var err error - o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool { + order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool { b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) var pos int b, pos = appendSpeculativeLength(b) @@ -312,14 +293,6 @@ func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, ma return b, err } -func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { - if !o.Deterministic { - mapv.Range(f) - return - } - mapsort.Range(mapv, kind, f) -} - // When encoding length-prefixed fields, we speculatively set aside some number of bytes // for the length, encode the data, and then encode the length (shifting the data if necessary // to make room). diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 10902bd851eb..4dba2b969972 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -111,18 +111,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { // equalValue compares two singular values. func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { - switch { - case fd.Message() != nil: - return equalMessage(x.Message(), y.Message()) - case fd.Kind() == pref.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind: + switch fd.Kind() { + case pref.BoolKind: + return x.Bool() == y.Bool() + case pref.EnumKind: + return x.Enum() == y.Enum() + case pref.Int32Kind, pref.Sint32Kind, + pref.Int64Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + return x.Int() == y.Int() + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + return x.Uint() == y.Uint() + case pref.FloatKind, pref.DoubleKind: fx := x.Float() fy := y.Float() if math.IsNaN(fx) || math.IsNaN(fy) { return math.IsNaN(fx) && math.IsNaN(fy) } return fx == fy + case pref.StringKind: + return x.String() == y.String() + case pref.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case pref.MessageKind, pref.GroupKind: + return equalMessage(x.Message(), y.Message()) default: return x.Interface() == y.Interface() } diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 1d692c3a8b33..312d5d45c60f 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -9,6 +9,7 @@ import ( "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -28,8 +29,12 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b if !flags.ProtoLegacy { return b, errors.New("no support for message_set_wire_format") } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + fieldOrder = order.NumberFieldOrder + } var err error - o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { b, err = o.marshalMessageSetField(b, fd, v) return err == nil }) diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index ca14b09c3419..1f0d183b102d 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -32,3 +32,12 @@ var Error error func init() { Error = errors.Error } + +// MessageName returns the full name of m. +// If m is nil, it returns an empty string. +func MessageName(m Message) protoreflect.FullName { + if m == nil { + return "" + } + return m.ProtoReflect().Descriptor().FullName() +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go new file mode 100644 index 000000000000..e4dfb1205063 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodesc provides functionality for converting +// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// +// The google.protobuf.FileDescriptorProto is a protobuf message that describes +// the type information for a .proto file in a form that is easily serializable. +// The protoreflect.FileDescriptor is a more structured representation of +// the FileDescriptorProto message where references and remote dependencies +// can be directly followed. +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// Resolver is the resolver used by NewFile to resolve dependencies. +// The enums and messages provided must belong to some parent file, +// which is also registered. +// +// It is implemented by protoregistry.Files. +type Resolver interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) +} + +// FileOptions configures the construction of file descriptors. +type FileOptions struct { + pragma.NoUnkeyedLiterals + + // AllowUnresolvable configures New to permissively allow unresolvable + // file, enum, or message dependencies. Unresolved dependencies are replaced + // by placeholder equivalents. + // + // The following dependencies may be left unresolved: + // • Resolving an imported file. + // • Resolving the type for a message field or extension field. + // If the kind of the field is unknown, then a placeholder is used for both + // the Enum and Message accessors on the protoreflect.FieldDescriptor. + // • Resolving an enum value set as the default for an optional enum field. + // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the + // first value in the associated enum (or zero if the also enum dependency + // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue + // is populated with a placeholder. + // • Resolving the extended message type for an extension field. + // • Resolving the input or output message type for a service method. + // + // If the unresolved dependency uses a relative name, + // then the placeholder will contain an invalid FullName with a "*." prefix, + // indicating that the starting prefix of the full name is unknown. + AllowUnresolvable bool +} + +// NewFile creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. See FileOptions.New for more information. +func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + return FileOptions{}.New(fd, r) +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. See FileOptions.NewFiles for more information. +func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + return FileOptions{}.NewFiles(fd) +} + +// New creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. The file must represent a valid proto file according +// to protobuf semantics. The returned descriptor is a deep copy of the input. +// +// Any imported files, enum types, or message types referenced in the file are +// resolved using the provided registry. When looking up an import file path, +// the path must be unique. The newly created file descriptor is not registered +// back into the provided file registry. +func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + if r == nil { + r = (*protoregistry.Files)(nil) // empty resolver + } + + // Handle the file descriptor content. + f := &filedesc.File{L2: &filedesc.FileL2{}} + switch fd.GetSyntax() { + case "proto2", "": + f.L1.Syntax = protoreflect.Proto2 + case "proto3": + f.L1.Syntax = protoreflect.Proto3 + default: + return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) + } + f.L1.Path = fd.GetName() + if f.L1.Path == "" { + return nil, errors.New("file path must be populated") + } + f.L1.Package = protoreflect.FullName(fd.GetPackage()) + if !f.L1.Package.IsValid() && f.L1.Package != "" { + return nil, errors.New("invalid package: %q", f.L1.Package) + } + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FileOptions) + f.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + + f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) + for _, i := range fd.GetPublicDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { + return nil, errors.New("invalid or duplicate public import index: %d", i) + } + f.L2.Imports[i].IsPublic = true + } + for _, i := range fd.GetWeakDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { + return nil, errors.New("invalid or duplicate weak import index: %d", i) + } + f.L2.Imports[i].IsWeak = true + } + imps := importSet{f.Path(): true} + for i, path := range fd.GetDependency() { + imp := &f.L2.Imports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + for i := range fd.GetDependency() { + imp := &f.L2.Imports[i] + imps.importPublic(imp.Imports()) + } + + // Handle source locations. + f.L2.Locations.File = f + for _, loc := range fd.GetSourceCodeInfo().GetLocation() { + var l protoreflect.SourceLocation + // TODO: Validate that the path points to an actual declaration? + l.Path = protoreflect.SourcePath(loc.GetPath()) + s := loc.GetSpan() + switch len(s) { + case 3: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) + case 4: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) + default: + return nil, errors.New("invalid span: %v", s) + } + // TODO: Validate that the span information is sensible? + // See https://github.com/protocolbuffers/protobuf/issues/6378. + if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || + (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { + return nil, errors.New("invalid span: %v", s) + } + l.LeadingDetachedComments = loc.GetLeadingDetachedComments() + l.LeadingComments = loc.GetLeadingComments() + l.TrailingComments = loc.GetTrailingComments() + f.L2.Locations.List = append(f.L2.Locations.List, l) + } + + // Step 1: Allocate and derive the names for all declarations. + // This copies all fields from the descriptor proto except: + // google.protobuf.FieldDescriptorProto.type_name + // google.protobuf.FieldDescriptorProto.default_value + // google.protobuf.FieldDescriptorProto.oneof_index + // google.protobuf.FieldDescriptorProto.extendee + // google.protobuf.MethodDescriptorProto.input + // google.protobuf.MethodDescriptorProto.output + var err error + sb := new(strs.Builder) + r1 := make(descsByName) + if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { + return nil, err + } + if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { + return nil, err + } + if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { + return nil, err + } + if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { + return nil, err + } + + // Step 2: Resolve every dependency reference not handled by step 1. + r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} + if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { + return nil, err + } + + // Step 3: Validate every enum, message, and extension declaration. + if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { + return nil, err + } + if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + + return f, nil +} + +type importSet map[string]bool + +func (is importSet) importPublic(imps protoreflect.FileImports) { + for i := 0; i < imps.Len(); i++ { + if imp := imps.Get(i); imp.IsPublic { + is[imp.Path()] = true + is.importPublic(imp.Imports()) + } + } +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. The descriptor set must include only +// valid files according to protobuf semantics. The returned descriptors +// are a deep copy of the input. +func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + files := make(map[string]*descriptorpb.FileDescriptorProto) + for _, fd := range fds.File { + if _, ok := files[fd.GetName()]; ok { + return nil, errors.New("file appears multiple times: %q", fd.GetName()) + } + files[fd.GetName()] = fd + } + r := &protoregistry.Files{} + for _, fd := range files { + if err := o.addFileDeps(r, fd, files); err != nil { + return nil, err + } + } + return r, nil +} +func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { + // Set the entry to nil while descending into a file's dependencies to detect cycles. + files[fd.GetName()] = nil + for _, dep := range fd.Dependency { + depfd, ok := files[dep] + if depfd == nil { + if ok { + return errors.New("import cycle in file: %q", dep) + } + continue + } + if err := o.addFileDeps(r, depfd, files); err != nil { + return err + } + } + // Delete the entry once dependencies are processed. + delete(files, fd.GetName()) + f, err := o.New(fd, r) + if err != nil { + return err + } + return r.RegisterFile(f) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go new file mode 100644 index 000000000000..37efda1afe9b --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type descsByName map[protoreflect.FullName]protoreflect.Descriptor + +func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { + es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers + for i, ed := range eds { + e := &es[i] + e.L2 = new(filedesc.EnumL2) + if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { + return nil, err + } + if opts := ed.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumOptions) + e.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + for _, s := range ed.GetReservedName() { + e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range ed.GetReservedRange() { + e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(rr.GetStart()), + protoreflect.EnumNumber(rr.GetEnd()), + }) + } + if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { + return nil, err + } + } + return es, nil +} + +func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { + vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers + for i, vd := range vds { + v := &vs[i] + if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := vd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) + v.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) + } + return vs, nil +} + +func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { + ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + m.L2 = new(filedesc.MessageL2) + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MessageOptions) + m.L2.Options = func() protoreflect.ProtoMessage { return opts } + m.L1.IsMapEntry = opts.GetMapEntry() + m.L1.IsMessageSet = opts.GetMessageSetWireFormat() + } + for _, s := range md.GetReservedName() { + m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range md.GetReservedRange() { + m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(rr.GetStart()), + protoreflect.FieldNumber(rr.GetEnd()), + }) + } + for _, xr := range md.GetExtensionRange() { + m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(xr.GetStart()), + protoreflect.FieldNumber(xr.GetEnd()), + }) + var optsFunc func() protoreflect.ProtoMessage + if opts := xr.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) + optsFunc = func() protoreflect.ProtoMessage { return opts } + } + m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) + } + if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { + return nil, err + } + if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { + return nil, err + } + if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { + return nil, err + } + if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { + return nil, err + } + if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { + return nil, err + } + } + return ms, nil +} + +func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { + fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers + for i, fd := range fds { + f := &fs[i] + if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { + return nil, err + } + f.L1.IsProto3Optional = fd.GetProto3Optional() + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + f.L1.Options = func() protoreflect.ProtoMessage { return opts } + f.L1.IsWeak = opts.GetWeak() + f.L1.HasPacked = opts.Packed != nil + f.L1.IsPacked = opts.GetPacked() + } + f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) + f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) + if fd.Type != nil { + f.L1.Kind = protoreflect.Kind(fd.GetType()) + } + if fd.JsonName != nil { + f.L1.StringName.InitJSON(fd.GetJsonName()) + } + } + return fs, nil +} + +func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { + os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers + for i, od := range ods { + o := &os[i] + if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { + return nil, err + } + if opts := od.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.OneofOptions) + o.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + } + return os, nil +} + +func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { + xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers + for i, xd := range xds { + x := &xs[i] + x.L2 = new(filedesc.ExtensionL2) + if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := xd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + x.L2.Options = func() protoreflect.ProtoMessage { return opts } + x.L2.IsPacked = opts.GetPacked() + } + x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) + x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) + if xd.Type != nil { + x.L1.Kind = protoreflect.Kind(xd.GetType()) + } + if xd.JsonName != nil { + x.L2.StringName.InitJSON(xd.GetJsonName()) + } + } + return xs, nil +} + +func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { + ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers + for i, sd := range sds { + s := &ss[i] + s.L2 = new(filedesc.ServiceL2) + if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := sd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) + s.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { + return nil, err + } + } + return ss, nil +} + +func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { + ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MethodOptions) + m.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + m.L1.IsStreamingClient = md.GetClientStreaming() + m.L1.IsStreamingServer = md.GetServerStreaming() + } + return ms, nil +} + +func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { + if !protoreflect.Name(name).IsValid() { + return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) + } + + // Derive the full name of the child. + // Note that enum values are a sibling to the enum parent in the namespace. + var fullName protoreflect.FullName + if _, ok := parent.(protoreflect.EnumDescriptor); ok { + fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) + } else { + fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) + } + if _, ok := r[fullName]; ok { + return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) + } + r[fullName] = child + + // TODO: Verify that the full name does not already exist in the resolver? + // This is not as critical since most usages of NewFile will register + // the created file back into the registry, which will perform this check. + + return filedesc.BaseL0{ + FullName: fullName, + ParentFile: parent.ParentFile().(*filedesc.File), + Parent: parent, + Index: idx, + }, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go new file mode 100644 index 000000000000..cebb36cdade6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -0,0 +1,286 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// resolver is a wrapper around a local registry of declarations within the file +// and the remote resolver. The remote resolver is restricted to only return +// descriptors that have been imported. +type resolver struct { + local descsByName + remote Resolver + imports importSet + + allowUnresolvable bool +} + +func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { + for i, md := range mds { + m := &ms[i] + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if f.L1.Cardinality == protoreflect.Required { + m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) + } + if fd.OneofIndex != nil { + k := int(fd.GetOneofIndex()) + if !(0 <= k && k < len(md.GetOneofDecl())) { + return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) + } + o := &m.L2.Oneofs.List[k] + f.L1.ContainingOneof = o + o.L1.Fields.List = append(o.L1.Fields.List, f) + } + + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) + } + if fd.DefaultValue != nil { + v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) + if err != nil { + return errors.New("message field %q has invalid default: %v", f.FullName(), err) + } + f.L1.Default = filedesc.DefaultValue(v, ev) + } + } + + if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { + for i, xd := range xds { + x := &xs[i] + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) + } + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) + } + if xd.DefaultValue != nil { + v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) + if err != nil { + return errors.New("extension field %q has invalid default: %v", x.FullName(), err) + } + x.L2.Default = filedesc.DefaultValue(v, ev) + } + } + return nil +} + +func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { + for i, sd := range sds { + s := &ss[i] + for j, md := range sd.GetMethod() { + m := &s.L2.Methods.List[j] + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) + } + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) + } + } + } + return nil +} + +// findTarget finds an enum or message descriptor if k is an enum, message, +// group, or unknown. If unknown, and the name could be resolved, the kind +// returned kind is set based on the type of the resolved descriptor. +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { + switch k { + case protoreflect.EnumKind: + ed, err := r.findEnumDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, ed, nil, nil + case protoreflect.MessageKind, protoreflect.GroupKind: + md, err := r.findMessageDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, nil, md, nil + case 0: + // Handle unspecified kinds (possible with parsers that operate + // on a per-file basis without knowledge of dependencies). + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return 0, nil, nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return 0, nil, nil, err + } + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return protoreflect.EnumKind, d, nil, nil + case protoreflect.MessageDescriptor: + return protoreflect.MessageKind, nil, d, nil + default: + return 0, nil, nil, errors.New("unknown kind") + } + default: + if ref != "" { + return 0, nil, nil, errors.New("target name cannot be specified for %v", k) + } + if !k.IsValid() { + return 0, nil, nil, errors.New("invalid kind: %d", k) + } + return k, nil, nil, nil + } +} + +// findDescriptor finds the descriptor by name, +// which may be a relative name within some scope. +// +// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", +// then the following full names are searched: +// * fizz.buzz.Foo.Bar +// * fizz.Foo.Bar +// * Foo.Bar +func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { + if !ref.IsValid() { + return nil, errors.New("invalid name reference: %q", ref) + } + if ref.IsFull() { + scope, ref = "", ref[1:] + } + var foundButNotImported protoreflect.Descriptor + for { + // Derive the full name to search. + s := protoreflect.FullName(ref) + if scope != "" { + s = scope + "." + s + } + + // Check the current file for the descriptor. + if d, ok := r.local[s]; ok { + return d, nil + } + + // Check the remote registry for the descriptor. + d, err := r.remote.FindDescriptorByName(s) + if err == nil { + // Only allow descriptors covered by one of the imports. + if r.imports[d.ParentFile().Path()] { + return d, nil + } + foundButNotImported = d + } else if err != protoregistry.NotFound { + return nil, errors.Wrap(err, "%q", s) + } + + // Continue on at a higher level of scoping. + if scope == "" { + if d := foundButNotImported; d != nil { + return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) + } + return nil, protoregistry.NotFound + } + scope = scope.Parent() + } +} + +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderEnum(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) + } + return ed, nil +} + +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an message", d.FullName()) + } + return md, nil +} + +// partialName is the partial name. A leading dot means that the name is full, +// otherwise the name is relative to some current scope. +// See google.protobuf.FieldDescriptorProto.type_name. +type partialName string + +func (s partialName) IsFull() bool { + return len(s) > 0 && s[0] == '.' +} + +func (s partialName) IsValid() bool { + if s.IsFull() { + return protoreflect.FullName(s[1:]).IsValid() + } + return protoreflect.FullName(s).IsValid() +} + +const unknownPrefix = "*." + +// FullName converts the partial name to a full name on a best-effort basis. +// If relative, it creates an invalid full name, using a "*." prefix +// to indicate that the start of the full name is unknown. +func (s partialName) FullName() protoreflect.FullName { + if s.IsFull() { + return protoreflect.FullName(s[1:]) + } + return protoreflect.FullName(unknownPrefix + s) +} + +func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + var evs protoreflect.EnumValueDescriptors + if fd.Enum() != nil { + evs = fd.Enum().Values() + } + v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) + if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { + v = protoreflect.ValueOfEnum(0) + if evs.Len() > 0 { + v = protoreflect.ValueOfEnum(evs.Get(0).Number()) + } + ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) + } else if err != nil { + return v, ev, err + } + if fd.Syntax() == protoreflect.Proto3 { + return v, ev, errors.New("cannot be specified under proto3 semantics") + } + if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { + return v, ev, errors.New("cannot be specified on composite types") + } + return v, ev, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go new file mode 100644 index 000000000000..9af1d56487a7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -0,0 +1,374 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "strings" + "unicode" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { + for i, ed := range eds { + e := &es[i] + if err := e.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("enum %q reserved names has %v", e.FullName(), err) + } + if err := e.L2.ReservedRanges.CheckValid(); err != nil { + return errors.New("enum %q reserved ranges has %v", e.FullName(), err) + } + if len(ed.GetValue()) == 0 { + return errors.New("enum %q must contain at least one value declaration", e.FullName()) + } + allowAlias := ed.GetOptions().GetAllowAlias() + foundAlias := false + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { + foundAlias = true + if !allowAlias { + return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) + } + } + } + if allowAlias && !foundAlias { + return errors.New("enum %q allows aliases, but none were found", e.FullName()) + } + if e.Syntax() == protoreflect.Proto3 { + if v := e.Values().Get(0); v.Number() != 0 { + return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + } + // Verify that value names in proto3 do not conflict if the + // case-insensitive prefix is removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 + names := map[string]protoreflect.EnumValueDescriptor{} + prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) + if v2, ok := names[s]; ok && v1.Number() != v2.Number() { + return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + } + names[s] = v1 + } + } + + for j, vd := range ed.GetValue() { + v := &e.L2.Values.List[j] + if vd.Number == nil { + return errors.New("enum value %q must have a specified number", v.FullName()) + } + if e.L2.ReservedNames.Has(v.Name()) { + return errors.New("enum value %q must not use reserved name", v.FullName()) + } + if e.L2.ReservedRanges.Has(v.Number()) { + return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) + } + } + } + return nil +} + +func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + for i, md := range mds { + m := &ms[i] + + // Handle the message descriptor itself. + isMessageSet := md.GetOptions().GetMessageSetWireFormat() + if err := m.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("message %q reserved names has %v", m.FullName(), err) + } + if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q reserved ranges has %v", m.FullName(), err) + } + if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q extension ranges has %v", m.FullName(), err) + } + if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { + return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) + } + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { + return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + } + if isMessageSet && !flags.ProtoLegacy { + return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) + } + if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) + } + if m.Syntax() == protoreflect.Proto3 { + if m.ExtensionRanges().Len() > 0 { + return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) + } + // Verify that field names in proto3 do not conflict if lowercased + // with all underscores removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 + names := map[string]protoreflect.FieldDescriptor{} + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) + if f2, ok := names[s]; ok { + return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + names[s] = f1 + } + } + + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if m.L2.ReservedNames.Has(f.Name()) { + return errors.New("message field %q must not use reserved name", f.FullName()) + } + if !f.Number().IsValid() { + return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) + } + if !f.Cardinality().IsValid() { + return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) + } + if m.L2.ReservedRanges.Has(f.Number()) { + return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) + } + if m.L2.ExtensionRanges.Has(f.Number()) { + return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) + } + if fd.Extendee != nil { + return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) + } + if f.L1.IsProto3Optional { + if f.Syntax() != protoreflect.Proto3 { + return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) + } + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) + } + if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { + return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) + } + } + if f.IsWeak() && !flags.ProtoLegacy { + return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) + } + if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + return errors.New("message field %q may only be weak for an optional message", f.FullName()) + } + if f.IsPacked() && !isPackable(f) { + return errors.New("message field %q is not packable", f.FullName()) + } + if err := checkValidGroup(f); err != nil { + return errors.New("message field %q is an invalid group: %v", f.FullName(), err) + } + if err := checkValidMap(f); err != nil { + return errors.New("message field %q is an invalid map: %v", f.FullName(), err) + } + if f.Syntax() == protoreflect.Proto3 { + if f.Cardinality() == protoreflect.Required { + return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) + } + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { + return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + } + } + } + seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs + for j := range md.GetOneofDecl() { + o := &m.L2.Oneofs.List[j] + if o.Fields().Len() == 0 { + return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) + } + if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { + return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) + } + + if o.IsSynthetic() { + seenSynthetic = true + continue + } + if !o.IsSynthetic() && seenSynthetic { + return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) + } + + for i := 0; i < o.Fields().Len(); i++ { + f := o.Fields().Get(i) + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) + } + if f.IsWeak() { + return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) + } + } + } + + if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { + return err + } + if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { + for i, xd := range xds { + x := &xs[i] + // NOTE: Avoid using the IsValid method since extensions to MessageSet + // may have a field number higher than normal. This check only verifies + // that the number is not negative or reserved. We check again later + // if we know that the extendee is definitely not a MessageSet. + if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { + return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) + } + if xd.JsonName != nil { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { + return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) + } + } + if xd.OneofIndex != nil { + return errors.New("extension field %q may not be part of a oneof", x.FullName()) + } + if md := x.ContainingMessage(); !md.IsPlaceholder() { + if !md.ExtensionRanges().Has(x.Number()) { + return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) + } + isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() + if isMessageSet && !isOptionalMessage(x) { + return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) + } + if !isMessageSet && !x.Number().IsValid() { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + } + if xd.GetOptions().GetWeak() { + return errors.New("extension field %q cannot be a weak reference", x.FullName()) + } + if x.IsPacked() && !isPackable(x) { + return errors.New("extension field %q is not packable", x.FullName()) + } + if err := checkValidGroup(x); err != nil { + return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) + } + if md := x.Message(); md != nil && md.IsMapEntry() { + return errors.New("extension field %q cannot be a map entry", x.FullName()) + } + if x.Syntax() == protoreflect.Proto3 { + switch x.ContainingMessage().FullName() { + case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): + default: + return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) + } + } + } + return nil +} + +// isOptionalMessage reports whether this is an optional message. +// If the kind is unknown, it is assumed to be a message. +func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { + return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional +} + +// isPackable checks whether the pack option can be specified. +func isPackable(fd protoreflect.FieldDescriptor) bool { + switch fd.Kind() { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.IsList() +} + +// checkValidGroup reports whether fd is a valid group according to the same +// rules that protoc imposes. +func checkValidGroup(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case fd.Kind() != protoreflect.GroupKind: + return nil + case fd.Syntax() != protoreflect.Proto2: + return errors.New("invalid under proto2 semantics") + case md == nil || md.IsPlaceholder(): + return errors.New("message must be resolvable") + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } + return nil +} + +// checkValidMap checks whether the field is a valid map according to the same +// rules that protoc imposes. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 +func checkValidMap(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case md == nil || !md.IsMapEntry(): + return nil + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): + return errors.New("incorrect implicit map entry name") + case fd.Cardinality() != protoreflect.Repeated: + return errors.New("field must be repeated") + case md.Fields().Len() != 2: + return errors.New("message must have exactly two fields") + case md.ExtensionRanges().Len() > 0: + return errors.New("message must not have any extension ranges") + case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: + return errors.New("message must not have any nested declarations") + } + kf := md.Fields().Get(0) + vf := md.Fields().Get(1) + switch { + case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): + return errors.New("invalid key field") + case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): + return errors.New("invalid value field") + } + switch kf.Kind() { + case protoreflect.BoolKind: // bool + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 + case protoreflect.StringKind: // string + default: + return errors.New("invalid key kind: %v", kf.Kind()) + } + if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { + return errors.New("map enum value must have zero number for the first value") + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go new file mode 100644 index 000000000000..a7c5ceffc9b1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -0,0 +1,252 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// google.protobuf.FileDescriptorProto message. +func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + p := &descriptorpb.FileDescriptorProto{ + Name: proto.String(file.Path()), + Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), + } + if file.Package() != "" { + p.Package = proto.String(string(file.Package())) + } + for i, imports := 0, file.Imports(); i < imports.Len(); i++ { + imp := imports.Get(i) + p.Dependency = append(p.Dependency, imp.Path()) + if imp.IsPublic { + p.PublicDependency = append(p.PublicDependency, int32(i)) + } + if imp.IsWeak { + p.WeakDependency = append(p.WeakDependency, int32(i)) + } + } + for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { + loc := locs.Get(i) + l := &descriptorpb.SourceCodeInfo_Location{} + l.Path = append(l.Path, loc.Path...) + if loc.StartLine == loc.EndLine { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} + } else { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} + } + l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) + if loc.LeadingComments != "" { + l.LeadingComments = proto.String(loc.LeadingComments) + } + if loc.TrailingComments != "" { + l.TrailingComments = proto.String(loc.TrailingComments) + } + if p.SourceCodeInfo == nil { + p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} + } + p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) + + } + for i, messages := 0, file.Messages(); i < messages.Len(); i++ { + p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, file.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, services := 0, file.Services(); i < services.Len(); i++ { + p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) + } + for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + p.Syntax = proto.String(file.Syntax().String()) + } + return p +} + +// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// google.protobuf.DescriptorProto message. +func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + p := &descriptorpb.DescriptorProto{ + Name: proto.String(string(message.Name())), + Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), + } + for i, fields := 0, message.Fields(); i < fields.Len(); i++ { + p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) + } + for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + for i, messages := 0, message.Messages(); i < messages.Len(); i++ { + p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, message.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { + xrange := xranges.Get(i) + p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(int32(xrange[0])), + End: proto.Int32(int32(xrange[1])), + Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), + }) + } + for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { + p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) + } + for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// google.protobuf.FieldDescriptorProto message. +func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + p := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(string(field.Name())), + Number: proto.Int32(int32(field.Number())), + Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), + Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), + } + if field.IsExtension() { + p.Extendee = fullNameOf(field.ContainingMessage()) + } + if field.Kind().IsValid() { + p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() + } + if field.Enum() != nil { + p.TypeName = fullNameOf(field.Enum()) + } + if field.Message() != nil { + p.TypeName = fullNameOf(field.Message()) + } + if field.HasJSONName() { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if field.IsExtension() { + p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name()))) + } else { + p.JsonName = proto.String(field.JSONName()) + } + } + if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { + p.Proto3Optional = proto.Bool(true) + } + if field.HasDefault() { + def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) + if err != nil && field.DefaultEnumValue() != nil { + def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values + } else if err != nil { + panic(fmt.Sprintf("%v: %v", field.FullName(), err)) + } + p.DefaultValue = proto.String(def) + } + if oneof := field.ContainingOneof(); oneof != nil { + p.OneofIndex = proto.Int32(int32(oneof.Index())) + } + return p +} + +// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// google.protobuf.OneofDescriptorProto message. +func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + return &descriptorpb.OneofDescriptorProto{ + Name: proto.String(string(oneof.Name())), + Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), + } +} + +// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// google.protobuf.EnumDescriptorProto message. +func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + p := &descriptorpb.EnumDescriptorProto{ + Name: proto.String(string(enum.Name())), + Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), + } + for i, values := 0, enum.Values(); i < values.Len(); i++ { + p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) + } + for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// google.protobuf.EnumValueDescriptorProto message. +func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + return &descriptorpb.EnumValueDescriptorProto{ + Name: proto.String(string(value.Name())), + Number: proto.Int32(int32(value.Number())), + Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), + } +} + +// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// google.protobuf.ServiceDescriptorProto message. +func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + p := &descriptorpb.ServiceDescriptorProto{ + Name: proto.String(string(service.Name())), + Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), + } + for i, methods := 0, service.Methods(); i < methods.Len(); i++ { + p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) + } + return p +} + +// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// google.protobuf.MethodDescriptorProto message. +func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + p := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(string(method.Name())), + InputType: fullNameOf(method.Input()), + OutputType: fullNameOf(method.Output()), + Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), + } + if method.IsStreamingClient() { + p.ClientStreaming = proto.Bool(true) + } + if method.IsStreamingServer() { + p.ServerStreaming = proto.Bool(true) + } + return p +} + +func fullNameOf(d protoreflect.Descriptor) *string { + if d == nil { + return nil + } + if strings.HasPrefix(string(d.FullName()), unknownPrefix) { + return proto.String(string(d.FullName()[len(unknownPrefix):])) + } + return proto.String("." + string(d.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go index 32ea3d98cd2a..121ba3a07bba 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -4,6 +4,10 @@ package protoreflect +import ( + "strconv" +) + // SourceLocations is a list of source locations. type SourceLocations interface { // Len reports the number of source locations in the proto file. @@ -11,9 +15,20 @@ type SourceLocations interface { // Get returns the ith SourceLocation. It panics if out of bounds. Get(int) SourceLocation - doNotImplement + // ByPath returns the SourceLocation for the given path, + // returning the first location if multiple exist for the same path. + // If multiple locations exist for the same path, + // then SourceLocation.Next index can be used to identify the + // index of the next SourceLocation. + // If no location exists for this path, it returns the zero value. + ByPath(path SourcePath) SourceLocation - // TODO: Add ByPath and ByDescriptor helper methods. + // ByDescriptor returns the SourceLocation for the given descriptor, + // returning the first location if multiple exist for the same path. + // If no location exists for this descriptor, it returns the zero value. + ByDescriptor(desc Descriptor) SourceLocation + + doNotImplement } // SourceLocation describes a source location and @@ -39,6 +54,10 @@ type SourceLocation struct { LeadingComments string // TrailingComments is the trailing attached comment for the declaration. TrailingComments string + + // Next is an index into SourceLocations for the next source location that + // has the same Path. It is zero if there is no next location. + Next int } // SourcePath identifies part of a file descriptor for a source location. @@ -48,5 +67,62 @@ type SourceLocation struct { // See google.protobuf.SourceCodeInfo.Location.path. type SourcePath []int32 -// TODO: Add SourcePath.String method to pretty-print the path. For example: -// ".message_type[6].nested_type[15].field[3]" +// Equal reports whether p1 equals p2. +func (p1 SourcePath) Equal(p2 SourcePath) bool { + if len(p1) != len(p2) { + return false + } + for i := range p1 { + if p1[i] != p2[i] { + return false + } + } + return true +} + +// String formats the path in a humanly readable manner. +// The output is guaranteed to be deterministic, +// making it suitable for use as a key into a Go map. +// It is not guaranteed to be stable as the exact output could change +// in a future version of this module. +// +// Example output: +// .message_type[6].nested_type[15].field[3] +func (p SourcePath) String() string { + b := p.appendFileDescriptorProto(nil) + for _, i := range p { + b = append(b, '.') + b = strconv.AppendInt(b, int64(i), 10) + } + return string(b) +} + +type appendFunc func(*SourcePath, []byte) []byte + +func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte { + if len(*p) == 0 { + return b + } + b = append(b, '.') + b = append(b, name...) + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} + +func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte { + b = p.appendSingularField(b, name, nil) + if len(*p) == 0 || (*p)[0] < 0 { + return b + } + b = append(b, '[') + b = strconv.AppendUint(b, uint64((*p)[0]), 10) + b = append(b, ']') + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go new file mode 100644 index 000000000000..b03c1223c4a4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -0,0 +1,461 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package protoreflect + +func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "package", nil) + case 3: + b = p.appendRepeatedField(b, "dependency", nil) + case 10: + b = p.appendRepeatedField(b, "public_dependency", nil) + case 11: + b = p.appendRepeatedField(b, "weak_dependency", nil) + case 4: + b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto) + case 7: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions) + case 9: + b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) + case 12: + b = p.appendSingularField(b, "syntax", nil) + } + return b +} + +func (p *SourcePath) appendDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 3: + b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto) + case 4: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange) + case 8: + b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto) + case 7: + b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions) + case 9: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) + case 10: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions) + case 4: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) + case 5: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions) + } + return b +} + +func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 3: + b = p.appendSingularField(b, "number", nil) + case 4: + b = p.appendSingularField(b, "label", nil) + case 5: + b = p.appendSingularField(b, "type", nil) + case 6: + b = p.appendSingularField(b, "type_name", nil) + case 2: + b = p.appendSingularField(b, "extendee", nil) + case 7: + b = p.appendSingularField(b, "default_value", nil) + case 9: + b = p.appendSingularField(b, "oneof_index", nil) + case 10: + b = p.appendSingularField(b, "json_name", nil) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions) + case 17: + b = p.appendSingularField(b, "proto3_optional", nil) + } + return b +} + +func (p *SourcePath) appendFileOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "java_package", nil) + case 8: + b = p.appendSingularField(b, "java_outer_classname", nil) + case 10: + b = p.appendSingularField(b, "java_multiple_files", nil) + case 20: + b = p.appendSingularField(b, "java_generate_equals_and_hash", nil) + case 27: + b = p.appendSingularField(b, "java_string_check_utf8", nil) + case 9: + b = p.appendSingularField(b, "optimize_for", nil) + case 11: + b = p.appendSingularField(b, "go_package", nil) + case 16: + b = p.appendSingularField(b, "cc_generic_services", nil) + case 17: + b = p.appendSingularField(b, "java_generic_services", nil) + case 18: + b = p.appendSingularField(b, "py_generic_services", nil) + case 42: + b = p.appendSingularField(b, "php_generic_services", nil) + case 23: + b = p.appendSingularField(b, "deprecated", nil) + case 31: + b = p.appendSingularField(b, "cc_enable_arenas", nil) + case 36: + b = p.appendSingularField(b, "objc_class_prefix", nil) + case 37: + b = p.appendSingularField(b, "csharp_namespace", nil) + case 39: + b = p.appendSingularField(b, "swift_prefix", nil) + case 40: + b = p.appendSingularField(b, "php_class_prefix", nil) + case 41: + b = p.appendSingularField(b, "php_namespace", nil) + case 44: + b = p.appendSingularField(b, "php_metadata_namespace", nil) + case 45: + b = p.appendSingularField(b, "ruby_package", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions) + } + return b +} + +func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions) + } + return b +} + +func (p *SourcePath) appendMessageOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "message_set_wire_format", nil) + case 2: + b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 7: + b = p.appendSingularField(b, "map_entry", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "number", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions) + } + return b +} + +func (p *SourcePath) appendEnumOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendSingularField(b, "allow_alias", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "input_type", nil) + case 3: + b = p.appendSingularField(b, "output_type", nil) + case 4: + b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions) + case 5: + b = p.appendSingularField(b, "client_streaming", nil) + case 6: + b = p.appendSingularField(b, "server_streaming", nil) + } + return b +} + +func (p *SourcePath) appendServiceOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendFieldOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "ctype", nil) + case 2: + b = p.appendSingularField(b, "packed", nil) + case 6: + b = p.appendSingularField(b, "jstype", nil) + case 5: + b = p.appendSingularField(b, "lazy", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 10: + b = p.appendSingularField(b, "weak", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart) + case 3: + b = p.appendSingularField(b, "identifier_value", nil) + case 4: + b = p.appendSingularField(b, "positive_int_value", nil) + case 5: + b = p.appendSingularField(b, "negative_int_value", nil) + case 6: + b = p.appendSingularField(b, "double_value", nil) + case 7: + b = p.appendSingularField(b, "string_value", nil) + case 8: + b = p.appendSingularField(b, "aggregate_value", nil) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "path", nil) + case 2: + b = p.appendRepeatedField(b, "span", nil) + case 3: + b = p.appendSingularField(b, "leading_comments", nil) + case 4: + b = p.appendSingularField(b, "trailing_comments", nil) + case 6: + b = p.appendRepeatedField(b, "leading_detached_comments", nil) + } + return b +} + +func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendOneofOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendMethodOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 34: + b = p.appendSingularField(b, "idempotency_level", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name_part", nil) + case 2: + b = p.appendSingularField(b, "is_extension", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5be14a725846..8e53c44a9188 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -232,11 +232,15 @@ type MessageDescriptor interface { type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } // MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// It is recommended that implementations of this interface also implement the +// MessageFieldTypes interface. type MessageType interface { // New returns a newly allocated empty message. + // It may return nil for synthetic messages representing a map entry. New() Message // Zero returns an empty, read-only message. + // It may return nil for synthetic messages representing a map entry. Zero() Message // Descriptor returns the message descriptor. @@ -245,6 +249,26 @@ type MessageType interface { Descriptor() MessageDescriptor } +// MessageFieldTypes extends a MessageType by providing type information +// regarding enums and messages referenced by the message fields. +type MessageFieldTypes interface { + MessageType + + // Enum returns the EnumType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not an enum kind. + // It panics if out of bounds. + // + // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() + Enum(i int) EnumType + + // Message returns the MessageType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not a message or group kind. + // It panics if out of bounds. + // + // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message() + Message(i int) MessageType +} + // MessageDescriptors is a list of message declarations. type MessageDescriptors interface { // Len reports the number of messages. @@ -279,8 +303,15 @@ type FieldDescriptor interface { // JSONName reports the name used for JSON serialization. // It is usually the camel-cased form of the field name. + // Extension fields are represented by the full name surrounded by brackets. JSONName() string + // TextName reports the name used for text serialization. + // It is usually the name of the field, except that groups use the name + // of the inlined message, and extension fields are represented by the + // full name surrounded by brackets. + TextName() string + // HasPresence reports whether the field distinguishes between unpopulated // and default values. HasPresence() bool @@ -371,6 +402,9 @@ type FieldDescriptors interface { // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. // It returns nil if not found. ByJSONName(s string) FieldDescriptor + // ByTextName returns the FieldDescriptor for a field with s as the text name. + // It returns nil if not found. + ByTextName(s string) FieldDescriptor // ByNumber returns the FieldDescriptor for a field numbered n. // It returns nil if not found. ByNumber(n FieldNumber) FieldDescriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 5e5f9671646f..66dcbcd0d21c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -17,24 +17,49 @@ package protoregistry import ( "fmt" - "log" + "os" "strings" "sync" + "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" ) +// conflictPolicy configures the policy for handling registration conflicts. +// +// It can be over-written at compile time with a linker-initialized variable: +// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" +// +// It can be over-written at program execution with an environment variable: +// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main +// +// Neither of the above are covered by the compatibility promise and +// may be removed in a future release of this module. +var conflictPolicy = "panic" // "panic" | "warn" | "ignore" + // ignoreConflict reports whether to ignore a registration conflict // given the descriptor being registered and the error. // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { - log.Printf(""+ - "WARNING: %v\n"+ - "A future release will panic on registration conflicts. See:\n"+ - "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+ - "\n", err) - return true + const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" + const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + policy := conflictPolicy + if v := os.Getenv(env); v != "" { + policy = v + } + switch policy { + case "panic": + panic(fmt.Sprintf("%v\nSee %v\n", err, faq)) + case "warn": + fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq) + return true + case "ignore": + return true + default: + panic("invalid " + env + " value: " + os.Getenv(env)) + } } var globalMutex sync.RWMutex @@ -96,38 +121,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { } path := file.Path() if prev := r.filesByPath[path]; prev != nil { - // TODO: Remove this after some soak-in period after moving these types. - var prevPath string - const prevModule = "google.golang.org/genproto" - const prevVersion = "cb27e3aa (May 26th, 2020)" - switch path { - case "google/protobuf/field_mask.proto": - prevPath = prevModule + "/protobuf/field_mask" - case "google/protobuf/api.proto": - prevPath = prevModule + "/protobuf/api" - case "google/protobuf/type.proto": - prevPath = prevModule + "/protobuf/ptype" - case "google/protobuf/source_context.proto": - prevPath = prevModule + "/protobuf/source_context" - } - if r == GlobalFiles && prevPath != "" { - pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") - pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" - currPath := "google.golang.org/protobuf/types/known/" + pkgName - panic(fmt.Sprintf(""+ - "duplicate registration of %q\n"+ - "\n"+ - "The generated definition for this file has moved:\n"+ - "\tfrom: %q\n"+ - "\tto: %q\n"+ - "A dependency on the %q module must\n"+ - "be at version %v or higher.\n"+ - "\n"+ - "Upgrade the dependency by running:\n"+ - "\tgo get -u %v\n", - path, prevPath, currPath, prevModule, prevVersion, prevPath)) - } - + r.checkGenProtoConflict(path) err := errors.New("file %q is already registered", file.Path()) err = amendErrorWithCaller(err, prev, file) if r == GlobalFiles && ignoreConflict(file, err) { @@ -178,6 +172,47 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { return nil } +// Several well-known types were hosted in the google.golang.org/genproto module +// but were later moved to this module. To avoid a weak dependency on the +// genproto module (and its relatively large set of transitive dependencies), +// we rely on a registration conflict to determine whether the genproto version +// is too old (i.e., does not contain aliases to the new type declarations). +func (r *Files) checkGenProtoConflict(path string) { + if r != GlobalFiles { + return + } + var prevPath string + const prevModule = "google.golang.org/genproto" + const prevVersion = "cb27e3aa (May 26th, 2020)" + switch path { + case "google/protobuf/field_mask.proto": + prevPath = prevModule + "/protobuf/field_mask" + case "google/protobuf/api.proto": + prevPath = prevModule + "/protobuf/api" + case "google/protobuf/type.proto": + prevPath = prevModule + "/protobuf/ptype" + case "google/protobuf/source_context.proto": + prevPath = prevModule + "/protobuf/source_context" + default: + return + } + pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") + pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb" + currPath := "google.golang.org/protobuf/types/known/" + pkgName + panic(fmt.Sprintf(""+ + "duplicate registration of %q\n"+ + "\n"+ + "The generated definition for this file has moved:\n"+ + "\tfrom: %q\n"+ + "\tto: %q\n"+ + "A dependency on the %q module must\n"+ + "be at version %v or higher.\n"+ + "\n"+ + "Upgrade the dependency by running:\n"+ + "\tgo get -u %v\n", + path, prevPath, currPath, prevModule, prevVersion, prevPath)) +} + // FindDescriptorByName looks up a descriptor by the full name. // // This returns (nil, NotFound) if not found. @@ -560,13 +595,25 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp return nil, NotFound } -// FindMessageByName looks up a message by its full name. -// E.g., "google.protobuf.Any" +// FindMessageByName looks up a message by its full name, +// e.g. "google.protobuf.Any". // -// This return (nil, NotFound) if not found. +// This returns (nil, NotFound) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - // The full name by itself is a valid URL. - return r.FindMessageByURL(string(message)) + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound } // FindMessageByURL looks up a message by a URL identifier. @@ -574,6 +621,8 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // // This returns (nil, NotFound) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. if r == nil { return nil, NotFound } @@ -613,6 +662,26 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E if xt, _ := v.(protoreflect.ExtensionType); xt != nil { return xt, nil } + + // MessageSet extensions are special in that the name of the extension + // is the name of the message type used to extend the MessageSet. + // This naming scheme is used by text and JSON serialization. + // + // This feature is protected by the ProtoLegacy flag since MessageSets + // are a proto1 feature that is long deprecated. + if flags.ProtoLegacy { + if _, ok := v.(protoreflect.MessageType); ok { + field := field.Append(messageset.ExtensionName) + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + if messageset.IsMessageSetExtension(xt.TypeDescriptor()) { + return xt, nil + } + } + } + } + } + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) } return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 8242378569bd..f77239fc3b0b 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -3557,16 +3557,15 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x8f, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x3e, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x3b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0xf8, 0x01, 0x01, - 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, + 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 82a473e2652f..8c10797b905e 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -166,10 +166,13 @@ import ( // Example 4: Pack and unpack a message in Go // // foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } // ... // foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// if err := any.UnmarshalTo(foo); err != nil { // ... // } // @@ -420,14 +423,15 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, + 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, + 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index f7a11099404b..a583ca2f6c77 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -303,16 +303,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{ 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a, - 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, - 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index 6a8d872c085c..7f94443d2699 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -393,9 +393,12 @@ func numValidPaths(m proto.Message, paths []string) int { // Identify the next message to search within. md = fd.Message() // may be nil - if fd.IsMap() { - md = fd.MapValue().Message() // may be nil + + // Repeated fields are only allowed at the last postion. + if fd.IsList() || fd.IsMap() { + md = nil } + return true }) { return i @@ -512,16 +515,16 @@ var file_google_protobuf_field_mask_proto_rawDesc = []byte{ 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x8c, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x3b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, + 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index c25e4bd7d0d7..c9ae92132aad 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -134,7 +134,16 @@ import ( // .setNanos((int) ((millis % 1000) * 1000000)).build(); // // -// Example 5: Compute Timestamp from current time in Python. +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// +// Example 6: Compute Timestamp from current time in Python. // // timestamp = Timestamp() // timestamp.GetCurrentTime() @@ -306,15 +315,15 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{ 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 2355adf428ea..895a8049e221 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -580,15 +580,16 @@ var file_google_protobuf_wrappers_proto_rawDesc = []byte{ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x7c, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, - 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0xf8, 0x01, 0x01, 0xa2, - 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go index 43ef7cb1456a..e511ad6f7fb9 100644 --- a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -411,6 +411,10 @@ type CodeGeneratorResponse_File struct { InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` // The file contents. Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + // Information describing the file content being inserted. If an insertion + // point is used, this information will be appropriately offset and inserted + // into the code generation metadata for the generated files. + GeneratedCodeInfo *descriptorpb.GeneratedCodeInfo `protobuf:"bytes,16,opt,name=generated_code_info,json=generatedCodeInfo" json:"generated_code_info,omitempty"` } func (x *CodeGeneratorResponse_File) Reset() { @@ -466,6 +470,13 @@ func (x *CodeGeneratorResponse_File) GetContent() string { return "" } +func (x *CodeGeneratorResponse_File) GetGeneratedCodeInfo() *descriptorpb.GeneratedCodeInfo { + if x != nil { + return x.GeneratedCodeInfo + } + return nil +} + var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ @@ -496,7 +507,7 @@ var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, - 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xbf, 0x02, 0x0a, + 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a, 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, @@ -507,23 +518,27 @@ var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, - 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0x5d, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, 0x65, - 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, - 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, - 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, - 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x42, 0x67, - 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x42, 0x0c, - 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x5a, 0x39, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, - 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x3b, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x67, 0x6f, + 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0xb1, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, + 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, + 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, + 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, + 0x4c, 0x10, 0x01, 0x42, 0x57, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62, } var ( @@ -547,16 +562,18 @@ var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{ (*CodeGeneratorResponse)(nil), // 3: google.protobuf.compiler.CodeGeneratorResponse (*CodeGeneratorResponse_File)(nil), // 4: google.protobuf.compiler.CodeGeneratorResponse.File (*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto + (*descriptorpb.GeneratedCodeInfo)(nil), // 6: google.protobuf.GeneratedCodeInfo } var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{ 5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto 1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version 4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 6, // 3: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_google_protobuf_compiler_plugin_proto_init() } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go index c056dd91ffa9..9bcbe50267bb 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go @@ -17,6 +17,8 @@ limitations under the License. package v1 import ( + "bytes" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/conversion" @@ -36,20 +38,29 @@ func Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in *apiextensio return nil } +var nullLiteral = []byte(`null`) + func Convert_apiextensions_JSON_To_v1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { raw, err := json.Marshal(*in) if err != nil { return err } - out.Raw = raw + if len(raw) == 0 || bytes.Equal(raw, nullLiteral) { + // match JSON#UnmarshalJSON treatment of literal nulls + out.Raw = nil + } else { + out.Raw = raw + } return nil } func Convert_v1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { if in != nil { var i interface{} - if err := json.Unmarshal(in.Raw, &i); err != nil { - return err + if len(in.Raw) > 0 && !bytes.Equal(in.Raw, nullLiteral) { + if err := json.Unmarshal(in.Raw, &i); err != nil { + return err + } } *out = i } else { @@ -103,7 +114,7 @@ func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefi func Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { if err := autoConvert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in, out, s); err != nil { - return nil + return err } if len(out.Versions) == 0 { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go index ba7f286eb4e2..12cc2f6f2c92 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + "bytes" "errors" "k8s.io/apimachinery/pkg/util/json" @@ -128,7 +129,7 @@ func (s JSON) MarshalJSON() ([]byte, error) { } func (s *JSON) UnmarshalJSON(data []byte) error { - if len(data) > 0 && string(data) != "null" { + if len(data) > 0 && !bytes.Equal(data, nullLiteral) { s.Raw = data } return nil diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go index e014ce62fd94..eed3fde63e15 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "bytes" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/util/json" @@ -36,20 +38,29 @@ func Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *apiext return nil } +var nullLiteral = []byte(`null`) + func Convert_apiextensions_JSON_To_v1beta1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { raw, err := json.Marshal(*in) if err != nil { return err } - out.Raw = raw + if len(raw) == 0 || bytes.Equal(raw, nullLiteral) { + // match JSON#UnmarshalJSON treatment of literal nulls + out.Raw = nil + } else { + out.Raw = raw + } return nil } func Convert_v1beta1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { if in != nil { var i interface{} - if err := json.Unmarshal(in.Raw, &i); err != nil { - return err + if len(in.Raw) > 0 && !bytes.Equal(in.Raw, nullLiteral) { + if err := json.Unmarshal(in.Raw, &i); err != nil { + return err + } } *out = i } else { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go index 9a8fad3b7760..44941d82efff 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + "bytes" "errors" "k8s.io/apimachinery/pkg/util/json" @@ -128,7 +129,7 @@ func (s JSON) MarshalJSON() ([]byte, error) { } func (s *JSON) UnmarshalJSON(data []byte) error { - if len(data) > 0 && string(data) != "null" { + if len(data) > 0 && !bytes.Equal(data, nullLiteral) { s.Raw = data } return nil diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE new file mode 100644 index 000000000000..6a66aea5eafe --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS new file mode 100644 index 000000000000..733099041f84 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go index 20bbab065056..26d264fe833b 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go @@ -60,7 +60,10 @@ func (admit *managedFieldsValidatingAdmissionController) Admit(ctx context.Conte } objectMeta, err := meta.Accessor(a.GetObject()) if err != nil { - return err + // the object we are dealing with doesn't have object metadata defined + // in that case we don't have to keep track of the managedField + // just call the wrapped admission + return mutationInterface.Admit(ctx, a, o) } managedFieldsBeforeAdmission := objectMeta.GetManagedFields() if err := mutationInterface.Admit(ctx, a, o); err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go index 91e2e9691473..ad651a2b59db 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/lastappliedupdater.go @@ -25,6 +25,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +const totalAnnotationSizeLimitB int64 = 256 * (1 << 10) // 256 kB + type lastAppliedUpdater struct { fieldManager Manager } @@ -78,8 +80,8 @@ func hasLastApplied(obj runtime.Object) bool { if annotations == nil { return false } - _, ok := annotations[corev1.LastAppliedConfigAnnotation] - return ok + lastApplied, ok := annotations[corev1.LastAppliedConfigAnnotation] + return ok && len(lastApplied) > 0 } func setLastApplied(obj runtime.Object, value string) error { @@ -92,6 +94,9 @@ func setLastApplied(obj runtime.Object, value string) error { annotations = map[string]string{} } annotations[corev1.LastAppliedConfigAnnotation] = value + if isAnnotationsValid(annotations) != nil { + delete(annotations, corev1.LastAppliedConfigAnnotation) + } accessor.SetAnnotations(annotations) return nil } @@ -115,3 +120,14 @@ func buildLastApplied(obj runtime.Object) (string, error) { } return string(lastApplied), nil } + +func isAnnotationsValid(annotations map[string]string) error { + var totalSize int64 + for k, v := range annotations { + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(totalAnnotationSizeLimitB) { + return fmt.Errorf("annotations size %d is larger than limit %d", totalSize, totalAnnotationSizeLimitB) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 09612984c90e..09ccb7b5c2ab 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -369,7 +369,7 @@ func RecordRequestAbort(req *http.Request, requestInfo *request.RequestInfo) { } scope := CleanScope(requestInfo) - reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), "", req) resource := requestInfo.Resource subresource := requestInfo.Subresource group := requestInfo.APIGroup @@ -392,7 +392,7 @@ func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInf // InstrumentRouteFunc which is registered in installer.go with predefined // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), "", req) if requestInfo.IsResourceRequest { requestTerminationsTotal.WithContext(req.Context()).WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc() @@ -414,7 +414,7 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp // InstrumentRouteFunc which is registered in installer.go with predefined // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), "", req) if requestInfo.IsResourceRequest { g = longRunningRequestGauge.WithContext(req.Context()).WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component) @@ -433,7 +433,7 @@ func MonitorRequest(req *http.Request, verb, group, version, resource, subresour // InstrumentRouteFunc which is registered in installer.go with predefined // list of verbs (different than those translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. - reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) + reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), verb, req) dryRun := cleanDryRun(req.URL) elapsedSeconds := elapsed.Seconds() @@ -544,8 +544,15 @@ func canonicalVerb(verb string, scope string) string { } } -func cleanVerb(verb string, request *http.Request) string { +func cleanVerb(verb, suggestedVerb string, request *http.Request) string { reportedVerb := verb + // CanonicalVerb (being an input for this function) doesn't handle correctly the + // deprecated path pattern for watch of: + // GET /api/{version}/watch/{resource} + // We correct it manually based on the pass verb from the installer. + if suggestedVerb == "WATCH" || suggestedVerb == "WATCHLIST" { + reportedVerb = "WATCH" + } if verb == "LIST" { // see apimachinery/pkg/runtime/conversion.go Convert_Slice_string_To_bool if values := request.URL.Query()["watch"]; len(values) > 0 { diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go index e341b371df6f..e066d2a7fa7e 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go @@ -29,8 +29,8 @@ type decoratedWatcher struct { resultCh chan watch.Event } -func newDecoratedWatcher(w watch.Interface, decorator func(runtime.Object)) *decoratedWatcher { - ctx, cancel := context.WithCancel(context.Background()) +func newDecoratedWatcher(ctx context.Context, w watch.Interface, decorator func(runtime.Object)) *decoratedWatcher { + ctx, cancel := context.WithCancel(ctx) d := &decoratedWatcher{ w: w, decorator: decorator, @@ -41,14 +41,18 @@ func newDecoratedWatcher(w watch.Interface, decorator func(runtime.Object)) *dec return d } +// run decorates watch events from the underlying watcher until its result channel +// is closed or the passed in context is done. +// When run() returns, decoratedWatcher#resultCh is closed. func (d *decoratedWatcher) run(ctx context.Context) { var recv, send watch.Event var ok bool + defer close(d.resultCh) for { select { case recv, ok = <-d.w.ResultChan(): - // The underlying channel may be closed after timeout. if !ok { + // The underlying channel was closed, cancel our context d.cancel() return } @@ -61,20 +65,24 @@ func (d *decoratedWatcher) run(ctx context.Context) { } select { case d.resultCh <- send: - if send.Type == watch.Error { - d.cancel() - } + // propagated event successfully case <-ctx.Done(): + // context timed out or was cancelled, stop the underlying watcher + d.w.Stop() + return } case <-ctx.Done(): + // context timed out or was cancelled, stop the underlying watcher d.w.Stop() - close(d.resultCh) return } } } func (d *decoratedWatcher) Stop() { + // stop the underlying watcher + d.w.Stop() + // cancel our context d.cancel() } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go index d40214f9db9b..0fe82ed0c974 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -1226,7 +1226,7 @@ func (e *Store) WatchPredicate(ctx context.Context, p storage.SelectionPredicate return nil, err } if e.Decorator != nil { - return newDecoratedWatcher(w, e.Decorator), nil + return newDecoratedWatcher(ctx, w, e.Decorator), nil } return w, nil } @@ -1239,7 +1239,7 @@ func (e *Store) WatchPredicate(ctx context.Context, p storage.SelectionPredicate return nil, err } if e.Decorator != nil { - return newDecoratedWatcher(w, e.Decorator), nil + return newDecoratedWatcher(ctx, w, e.Decorator), nil } return w, nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index 1bc66cee29cc..4f060f0317f7 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -464,6 +464,11 @@ func (c *Config) AddPostStartHookOrDie(name string, hook PostStartHookFunc) { } } +// HasBeenReadySignal exposes a server's lifecycle signal which is signaled when the readyz endpoint succeeds for the first time. +func (c *Config) HasBeenReadySignal() <-chan struct{} { + return c.hasBeenReadyCh +} + // Complete fills in any fields not set that are required to have valid data and can be derived // from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig { diff --git a/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go b/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go index 655543a2513d..c0d49e3d177d 100644 --- a/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go @@ -45,6 +45,9 @@ func (s *DeprecatedInsecureServingInfo) Serve(handler http.Handler, shutdownTime Addr: s.Listener.Addr().String(), Handler: handler, MaxHeaderBytes: 1 << 20, + + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadHeaderTimeout: 32 * time.Second, // just shy of requestTimeoutUpperBound } if len(s.Name) > 0 { diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go index 701540d64ada..f03e3be48735 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go @@ -134,7 +134,7 @@ func tunnelHTTPConnect(proxyConn net.Conn, proxyAddress, addr string) (net.Conn, type proxier interface { // proxy returns a connection to addr. - proxy(addr string) (net.Conn, error) + proxy(ctx context.Context, addr string) (net.Conn, error) } var _ proxier = &httpConnectProxier{} @@ -144,7 +144,7 @@ type httpConnectProxier struct { proxyAddress string } -func (t *httpConnectProxier) proxy(addr string) (net.Conn, error) { +func (t *httpConnectProxier) proxy(ctx context.Context, addr string) (net.Conn, error) { return tunnelHTTPConnect(t.conn, t.proxyAddress, addr) } @@ -154,8 +154,8 @@ type grpcProxier struct { tunnel client.Tunnel } -func (g *grpcProxier) proxy(addr string) (net.Conn, error) { - return g.tunnel.Dial("tcp", addr) +func (g *grpcProxier) proxy(ctx context.Context, addr string) (net.Conn, error) { + return g.tunnel.DialContext(ctx, "tcp", addr) } type proxyServerConnector interface { @@ -203,7 +203,8 @@ func (u *udsGRPCConnector) connect() (proxier, error) { return c, err }) - tunnel, err := client.CreateSingleUseGrpcTunnel(udsName, dialOption, grpc.WithInsecure()) + ctx := context.TODO() + tunnel, err := client.CreateSingleUseGrpcTunnel(ctx, udsName, dialOption, grpc.WithInsecure()) if err != nil { return nil, err } @@ -234,7 +235,7 @@ func (d *dialerCreator) createDialer() utilnet.DialFunc { egressmetrics.Metrics.ObserveDialFailure(d.options.protocol, d.options.transport, egressmetrics.StageConnect) return nil, err } - conn, err := proxier.proxy(addr) + conn, err := proxier.proxy(ctx, addr) if err != nil { egressmetrics.Metrics.ObserveDialFailure(d.options.protocol, d.options.transport, egressmetrics.StageProxy) return nil, err @@ -361,6 +362,16 @@ func NewEgressSelector(config *apiserver.EgressSelectorConfiguration) (*EgressSe return cs, nil } +// NewEgressSelectorWithMap returns a EgressSelector with the supplied EgressType to DialFunc map. +func NewEgressSelectorWithMap(m map[EgressType]utilnet.DialFunc) *EgressSelector { + if m == nil { + m = make(map[EgressType]utilnet.DialFunc) + } + return &EgressSelector{ + egressToDialer: m, + } +} + // Lookup gets the dialer function for the network context. // This is configured for the Kubernetes API Server at startup. func (cs *EgressSelector) Lookup(networkContext NetworkContext) (utilnet.DialFunc, error) { diff --git a/vendor/k8s.io/apiserver/pkg/server/secure_serving.go b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go index 38341eb03bd6..75e795aec4e2 100644 --- a/vendor/k8s.io/apiserver/pkg/server/secure_serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/secure_serving.go @@ -159,6 +159,9 @@ func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Dur Handler: handler, MaxHeaderBytes: 1 << 20, TLSConfig: tlsConfig, + + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadHeaderTimeout: 32 * time.Second, // just shy of requestTimeoutUpperBound } // At least 99% of serialized resources in surveyed clusters were smaller than 256kb. @@ -166,7 +169,9 @@ func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Dur // and small enough to allow a per connection buffer of this size multiplied by `MaxConcurrentStreams`. const resourceBody99Percentile = 256 * 1024 - http2Options := &http2.Server{} + http2Options := &http2.Server{ + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + } // shrink the per-stream buffer and max framesize from the 1MB default while still accommodating most API POST requests in a single frame http2Options.MaxUploadBufferPerStream = resourceBody99Percentile diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index 01008b7d03a6..1bd9bc8cbb67 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -230,12 +230,22 @@ func (s *store) conditionalDelete( } // It's possible we're working with stale data. + // Remember the revision of the potentially stale data and the resulting update error + cachedRev := origState.rev + cachedUpdateErr := err + // Actually fetch origState, err = getCurrentState() if err != nil { return err } origStateIsCurrent = true + + // it turns out our cached data was not stale, return the error + if cachedRev == origState.rev { + return cachedUpdateErr + } + // Retry continue } @@ -246,12 +256,22 @@ func (s *store) conditionalDelete( } // It's possible we're working with stale data. + // Remember the revision of the potentially stale data and the resulting update error + cachedRev := origState.rev + cachedUpdateErr := err + // Actually fetch origState, err = getCurrentState() if err != nil { return err } origStateIsCurrent = true + + // it turns out our cached data was not stale, return the error + if cachedRev == origState.rev { + return cachedUpdateErr + } + // Retry continue } @@ -345,12 +365,22 @@ func (s *store) GuaranteedUpdate( } // It's possible we were working with stale data + // Remember the revision of the potentially stale data and the resulting update error + cachedRev := origState.rev + cachedUpdateErr := err + // Actually fetch origState, err = getCurrentState() if err != nil { return err } origStateIsCurrent = true + + // it turns out our cached data was not stale, return the error + if cachedRev == origState.rev { + return cachedUpdateErr + } + // Retry continue } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go index 3ee3867456d5..a58c41804c62 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -67,6 +67,14 @@ const timeFmt = "2006-01-02T15:04:05.999" // undesired becomes completely unused, all the config objects are // read and processed as a whole. +// The funcs in this package follow the naming convention that the suffix +// "Locked" means the relevant mutex must be locked at the start of each +// call and will be locked upon return. For a configController, the +// suffix "ReadLocked" stipulates a read lock while just "Locked" +// stipulates a full lock. Absence of either suffix means that either +// (a) the lock must NOT be held at call time and will not be held +// upon return or (b) locking is irrelevant. + // StartFunction begins the process of handling a request. If the // request gets queued then this function uses the given hashValue as // the source of entropy as it shuffle-shards the request into a @@ -123,10 +131,22 @@ type configController struct { // requestWaitLimit comes from server configuration. requestWaitLimit time.Duration + // the most recent update attempts, ordered by increasing age. + // Consumer trims to keep only the last minute's worth of entries. + // The controller uses this to limit itself to at most six updates + // to a given FlowSchema in any minute. + // This may only be accessed from the one and only worker goroutine. + mostRecentUpdates []updateAttempt + // This must be locked while accessing flowSchemas or - // priorityLevelStates. It is the lock involved in - // LockingWriteMultiple. - lock sync.Mutex + // priorityLevelStates. A lock for writing is needed + // for writing to any of the following: + // - the flowSchemas field + // - the slice held in the flowSchemas field + // - the priorityLevelStates field + // - the map held in the priorityLevelStates field + // - any field of a priorityLevelState held in that map + lock sync.RWMutex // flowSchemas holds the flow schema objects, sorted by increasing // numerical (decreasing logical) matching precedence. Every @@ -137,13 +157,6 @@ type configController struct { // name to the state for that level. Every name referenced from a // member of `flowSchemas` has an entry here. priorityLevelStates map[string]*priorityLevelState - - // the most recent update attempts, ordered by increasing age. - // Consumer trims to keep only the last minute's worth of entries. - // The controller uses this to limit itself to at most six updates - // to a given FlowSchema in any minute. - // This may only be accessed from the one and only worker goroutine. - mostRecentUpdates []updateAttempt } type updateAttempt struct { @@ -276,8 +289,8 @@ func (cfgCtlr *configController) MaintainObservations(stopCh <-chan struct{}) { } func (cfgCtlr *configController) updateObservations() { - cfgCtlr.lock.Lock() - defer cfgCtlr.lock.Unlock() + cfgCtlr.lock.RLock() + defer cfgCtlr.lock.RUnlock() for _, plc := range cfgCtlr.priorityLevelStates { if plc.queues != nil { plc.queues.UpdateObservations() @@ -760,8 +773,8 @@ func (immediateRequest) Finish(execute func()) bool { // waiting in its queue, or `Time{}` if this did not happen. func (cfgCtlr *configController) startRequest(ctx context.Context, rd RequestDigest, queueNoteFn fq.QueueNoteFn) (fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, isExempt bool, req fq.Request, startWaitingTime time.Time) { klog.V(7).Infof("startRequest(%#+v)", rd) - cfgCtlr.lock.Lock() - defer cfgCtlr.lock.Unlock() + cfgCtlr.lock.RLock() + defer cfgCtlr.lock.RUnlock() var selectedFlowSchema, catchAllFlowSchema *flowcontrol.FlowSchema for _, fs := range cfgCtlr.flowSchemas { if matchesFlowSchema(rd, fs) { @@ -806,7 +819,7 @@ func (cfgCtlr *configController) startRequest(ctx context.Context, rd RequestDig klog.V(7).Infof("startRequest(%#+v) => fsName=%q, distMethod=%#+v, plName=%q, numQueues=%d", rd, selectedFlowSchema.Name, selectedFlowSchema.Spec.DistinguisherMethod, plName, numQueues) req, idle := plState.queues.StartRequest(ctx, hashValue, flowDistinguisher, selectedFlowSchema.Name, rd.RequestInfo, rd.User, queueNoteFn) if idle { - cfgCtlr.maybeReapLocked(plName, plState) + cfgCtlr.maybeReapReadLocked(plName, plState) } return selectedFlowSchema, plState.pl, false, req, startWaitingTime } @@ -815,8 +828,8 @@ func (cfgCtlr *configController) startRequest(ctx context.Context, rd RequestDig // priority level if it has no more use. Call this after getting a // clue that the given priority level is undesired and idle. func (cfgCtlr *configController) maybeReap(plName string) { - cfgCtlr.lock.Lock() - defer cfgCtlr.lock.Unlock() + cfgCtlr.lock.RLock() + defer cfgCtlr.lock.RUnlock() plState := cfgCtlr.priorityLevelStates[plName] if plState == nil { klog.V(7).Infof("plName=%s, plState==nil", plName) @@ -838,7 +851,7 @@ func (cfgCtlr *configController) maybeReap(plName string) { // it has no more use. Call this if both (1) plState.queues is // non-nil and reported being idle, and (2) cfgCtlr's lock has not // been released since then. -func (cfgCtlr *configController) maybeReapLocked(plName string, plState *priorityLevelState) { +func (cfgCtlr *configController) maybeReapReadLocked(plName string, plState *priorityLevelState) { if !(plState.quiescing && plState.numPending == 0) { return } diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE new file mode 100644 index 000000000000..6a66aea5eafe --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS new file mode 100644 index 000000000000..733099041f84 --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index c732c782207b..90fa45ddbfeb 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -216,13 +216,13 @@ var internalPackages = []string{"client-go/tools/cache/"} // objects and subsequent deltas. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(2).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) wait.BackoffUntil(func() { if err := r.ListAndWatch(stopCh); err != nil { r.watchErrorHandler(r, err) } }, r.backoffManager, true, stopCh) - klog.V(2).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) } var ( diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go index c38ebc076045..6eee281bc66c 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go @@ -165,7 +165,7 @@ func Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(in *map[str newExtension := (*in)[key] oldExtension := runtime.RawExtension{} if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&newExtension, &oldExtension, s); err != nil { - return nil + return err } namedExtension := NamedExtension{key, oldExtension} *out = append(*out, namedExtension) diff --git a/vendor/k8s.io/cloud-provider/go.mod b/vendor/k8s.io/cloud-provider/go.mod index f1c483072853..05a97e74e50c 100644 --- a/vendor/k8s.io/cloud-provider/go.mod +++ b/vendor/k8s.io/cloud-provider/go.mod @@ -5,17 +5,17 @@ module k8s.io/cloud-provider go 1.16 require ( - github.com/google/go-cmp v0.5.4 + github.com/google/go-cmp v0.5.5 github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.6.1 - k8s.io/api v0.21.0-rc.0 - k8s.io/apimachinery v0.21.0-rc.0 - k8s.io/apiserver v0.21.0-rc.0 - k8s.io/client-go v0.21.0-rc.0 + k8s.io/api v0.21.1 + k8s.io/apimachinery v0.21.1 + k8s.io/apiserver v0.21.1 + k8s.io/client-go v0.21.1 k8s.io/component-base v0.21.0-rc.0 k8s.io/controller-manager v0.0.0 - k8s.io/klog/v2 v2.8.0 + k8s.io/klog/v2 v2.9.0 k8s.io/utils v0.0.0-20210521133846-da695404a2bc ) @@ -25,9 +25,8 @@ replace ( github.com/imdario/mergo => github.com/imdario/mergo v0.3.5 github.com/mattn/go-colorable => github.com/mattn/go-colorable v0.0.9 github.com/onsi/ginkgo => github.com/openshift/ginkgo v4.7.0-origin.0+incompatible - github.com/opencontainers/runc => github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e - github.com/openshift/api => github.com/openshift/api v0.0.0-20210422150128-d8a48168c81c - github.com/openshift/apiserver-library-go => github.com/openshift/apiserver-library-go v0.0.0-20210426120049-59b0e972bfb7 + github.com/openshift/api => github.com/openshift/api v0.0.0-20210713130143-be21c6cb1bea + github.com/openshift/build-machinery-go => github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359 github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535 github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427 github.com/robfig/cron => github.com/robfig/cron v1.1.0 diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum index 3069fdbe605e..156e2dacdd84 100644 --- a/vendor/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -76,6 +76,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -96,7 +97,9 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= @@ -104,6 +107,7 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z github.com/container-storage-interface/spec v1.3.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -128,7 +132,9 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -282,8 +288,9 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= @@ -300,8 +307,9 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -446,6 +454,7 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= @@ -480,11 +489,17 @@ github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1 github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/openshift/api v0.0.0-20210422150128-d8a48168c81c/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= -github.com/openshift/apiserver-library-go v0.0.0-20210426120049-59b0e972bfb7/go.mod h1:nqn2IWld2A+Q9Lp/xGsbmUr2RyDCQixRU83yqAbymUM= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/openshift/api v0.0.0-20210713130143-be21c6cb1bea/go.mod h1:izBmoXbUu3z5kUa4FjZhvekTsyzIWiOoaIgJiZBBMQs= +github.com/openshift/apiserver-library-go v0.0.0-20211116020226-339bb71f9a26/go.mod h1:hmRcqTWiLRXXEnVLhCNoZBfmciZD2N2NrHTEzcRqhK8= github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535/go.mod h1:v5/AYttPCjfqMGC1Ed/vutuDpuXmgWc5O+W9nwQ7EtE= github.com/openshift/ginkgo v4.7.0-origin.0+incompatible h1:2qD1n/RAnycWMPjYS6MEAUzRmVoF0ql7ozk1ANv8dcM= @@ -492,7 +507,6 @@ github.com/openshift/ginkgo v4.7.0-origin.0+incompatible/go.mod h1:8METQ1gDhl0KW github.com/openshift/google-cadvisor v0.33.2-0.20210610135131-57b941c7657a/go.mod h1:kN93gpdevu+bpS227TyHVZyCU5bbqCzTj5T9drl34MI= github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427 h1:/6Xf107BJIzdfRe9xfuU4xnx7TUHQ7vzDMWiNYPmxfM= github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427/go.mod h1:pnz961veImKsbn7pQcuFbcVpCQosYiC1fUOjzEDeOLU= -github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -552,8 +566,9 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= @@ -783,6 +798,7 @@ golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -792,6 +808,7 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -808,8 +825,9 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -933,8 +951,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -989,11 +1009,12 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kubernetes v1.21.0-rc.0/go.mod h1:Yx6XZ8zalyqEk7but+j4+5SvLzdyH1eeqZ4cwO+5dD4= +k8s.io/kubernetes v1.21.1/go.mod h1:ef++isEL1PW0taH6z7DXrSztPglrZ7jQhyvcMEtm0gQ= k8s.io/system-validators v1.4.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210521133846-da695404a2bc h1:dx6VGe+PnOW/kD/2UV4aUSsRfJGd7+lcqgJ6Xg0HwUs= @@ -1007,8 +1028,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= sigs.k8s.io/kube-storage-version-migrator v0.0.3/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY= @@ -1016,8 +1037,9 @@ sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIM sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbgfcalOS/SXa/tcyo= sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/k8s.io/component-helpers/apimachinery/lease/controller.go b/vendor/k8s.io/component-helpers/apimachinery/lease/controller.go index 3b31c80a7a03..517351a3b4da 100644 --- a/vendor/k8s.io/component-helpers/apimachinery/lease/controller.go +++ b/vendor/k8s.io/component-helpers/apimachinery/lease/controller.go @@ -95,7 +95,7 @@ func (c *controller) Run(stopCh <-chan struct{}) { klog.Infof("lease controller has nil lease client, will not claim or renew leases") return } - wait.Until(c.sync, c.renewInterval, stopCh) + wait.JitterUntil(c.sync, c.renewInterval, 0.04, true, stopCh) } func (c *controller) sync() { diff --git a/vendor/k8s.io/csi-translation-lib/go.mod b/vendor/k8s.io/csi-translation-lib/go.mod index a590b4602049..51983555dda8 100644 --- a/vendor/k8s.io/csi-translation-lib/go.mod +++ b/vendor/k8s.io/csi-translation-lib/go.mod @@ -6,9 +6,9 @@ go 1.16 require ( github.com/stretchr/testify v1.6.1 - k8s.io/api v0.21.0-rc.0 - k8s.io/apimachinery v0.21.0-rc.0 - k8s.io/klog/v2 v2.8.0 + k8s.io/api v0.21.1 + k8s.io/apimachinery v0.21.1 + k8s.io/klog/v2 v2.9.0 ) replace ( diff --git a/vendor/k8s.io/csi-translation-lib/go.sum b/vendor/k8s.io/csi-translation-lib/go.sum index 05c961d1a1bd..04fe32b54369 100644 --- a/vendor/k8s.io/csi-translation-lib/go.sum +++ b/vendor/k8s.io/csi-translation-lib/go.sum @@ -1,11 +1,7 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -13,8 +9,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -29,26 +23,13 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -91,7 +72,6 @@ github.com/openshift/ginkgo v4.7.0-origin.0+incompatible/go.mod h1:8METQ1gDhl0KW github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -106,16 +86,9 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -126,13 +99,10 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -145,13 +115,9 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -161,23 +127,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -194,15 +145,13 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 25483fad1388..1e187f76354b 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -284,6 +284,7 @@ func (m *moduleSpec) Get() interface{} { var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") +// Set will sets module value // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { var filter []modulePat @@ -362,6 +363,7 @@ func (t *traceLocation) Get() interface{} { var errTraceSyntax = errors.New("syntax error: expect file.go:234") +// Set will sets backtrace value // Syntax: -log_backtrace_at=gopherflakes.go:234 // Note that unlike vmodule the file extension is included here. func (t *traceLocation) Set(value string) error { @@ -708,7 +710,7 @@ func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args args = filter.Filter(args) } fmt.Fprintln(buf, args...) - l.output(s, logr, buf, file, line, false) + l.output(s, logr, buf, 0 /* depth */, file, line, false) } func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { @@ -730,7 +732,7 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, de if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, file, line, false) + l.output(s, logr, buf, depth, file, line, false) } func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) { @@ -748,7 +750,7 @@ func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, file, line, false) + l.output(s, logr, buf, 0 /* depth */, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If @@ -769,7 +771,7 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFil if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, file, line, alsoToStderr) + l.output(s, logr, buf, 2 /* depth */, file, line, alsoToStderr) } // if loggr is specified, will call loggr.Error, otherwise output with logging module. @@ -778,7 +780,7 @@ func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth msg, keysAndValues = filter.FilterS(msg, keysAndValues) } if loggr != nil { - loggr.Error(err, msg, keysAndValues...) + logr.WithCallDepth(loggr, depth+2).Error(err, msg, keysAndValues...) return } l.printS(err, errorLog, depth+1, msg, keysAndValues...) @@ -790,7 +792,7 @@ func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg str msg, keysAndValues = filter.FilterS(msg, keysAndValues) } if loggr != nil { - loggr.Info(msg, keysAndValues...) + logr.WithCallDepth(loggr, depth+2).Info(msg, keysAndValues...) return } l.printS(nil, infoLog, depth+1, msg, keysAndValues...) @@ -825,6 +827,8 @@ func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { switch v.(type) { case string, error: b.WriteString(fmt.Sprintf("%s=%q", k, v)) + case []byte: + b.WriteString(fmt.Sprintf("%s=%+q", k, v)) default: if _, ok := v.(fmt.Stringer); ok { b.WriteString(fmt.Sprintf("%s=%q", k, v)) @@ -855,12 +859,13 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { // SetLogger will set the backing logr implementation for klog. // If set, all log lines will be suppressed from the regular Output, and // redirected to the logr implementation. -// All log lines include the 'severity', 'file' and 'line' values attached as -// structured logging values. // Use as: // ... // klog.SetLogger(zapr.NewLogger(zapLog)) func SetLogger(logr logr.Logger) { + logging.mu.Lock() + defer logging.mu.Unlock() + logging.logr = logr } @@ -899,7 +904,7 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) { l.mu.Lock() if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { @@ -911,9 +916,9 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == errorLog { - l.logr.Error(nil, string(data)) + logr.WithCallDepth(l.logr, depth+3).Error(nil, string(data)) } else { - log.Info(string(data)) + logr.WithCallDepth(log, depth+3).Info(string(data)) } } else if l.toStderr { os.Stderr.Write(data) diff --git a/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go b/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go index 8349b1d0ca77..33d5addeabdd 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go @@ -257,6 +257,7 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg (func() ([]byte, []byte))(s.proxyCurrentCertKeyContent), s.serviceResolver, c.GenericConfig.EgressSelector, + c.GenericConfig.HasBeenReadySignal(), ) if err != nil { return nil, err diff --git a/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go b/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go index 47b4f3ed248b..90a9905edcdf 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/httpstream" - "k8s.io/apimachinery/pkg/util/httpstream/spdy" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/proxy" auditinternal "k8s.io/apiserver/pkg/apis/audit" @@ -169,23 +168,21 @@ func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } - // we need to wrap the roundtripper in another roundtripper which will apply the front proxy headers - proxyRoundTripper, upgrade, err := maybeWrapForConnectionUpgrades(handlingInfo.restConfig, handlingInfo.proxyRoundTripper, req) - if err != nil { - proxyError(w, req, err.Error(), http.StatusInternalServerError) - return - } + proxyRoundTripper := handlingInfo.proxyRoundTripper + upgrade := httpstream.IsUpgradeRequest(req) + proxyRoundTripper = transport.NewAuthProxyRoundTripper(user.GetName(), user.GetGroups(), user.GetExtra(), proxyRoundTripper) - // if we are upgrading, then the upgrade path tries to use this request with the TLS config we provide, but it does - // NOT use the roundtripper. Its a direct call that bypasses the round tripper. This means that we have to - // attach the "correct" user headers to the request ahead of time. After the initial upgrade, we'll be back - // at the roundtripper flow, so we only have to muck with this request, but we do have to do it. + // If we are upgrading, then the upgrade path tries to use this request with the TLS config we provide, but it does + // NOT use the proxyRoundTripper. It's a direct dial that bypasses the proxyRoundTripper. This means that we have to + // attach the "correct" user headers to the request ahead of time. if upgrade { transport.SetAuthProxyHeaders(newReq, user.GetName(), user.GetGroups(), user.GetExtra()) } handler := proxy.NewUpgradeAwareHandler(location, proxyRoundTripper, true, upgrade, &responder{w: w}) + handler.InterceptRedirects = utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StreamingProxyRedirects) + handler.RequireSameHostRedirects = utilfeature.DefaultFeatureGate.Enabled(genericfeatures.ValidateProxyRedirects) handler.ServeHTTP(w, newReq) } @@ -220,27 +217,6 @@ func newRequestForProxy(location *url.URL, req *http.Request) (*http.Request, co return newReq, cancelFn } -// maybeWrapForConnectionUpgrades wraps the roundtripper for upgrades. The bool indicates if it was wrapped -func maybeWrapForConnectionUpgrades(restConfig *restclient.Config, rt http.RoundTripper, req *http.Request) (http.RoundTripper, bool, error) { - if !httpstream.IsUpgradeRequest(req) { - return rt, false, nil - } - - tlsConfig, err := restclient.TLSConfigFor(restConfig) - if err != nil { - return nil, true, err - } - followRedirects := utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StreamingProxyRedirects) - requireSameHostRedirects := utilfeature.DefaultFeatureGate.Enabled(genericfeatures.ValidateProxyRedirects) - upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects) - wrappedRT, err := restclient.HTTPWrappersForConfig(restConfig, upgradeRoundTripper) - if err != nil { - return nil, true, err - } - - return wrappedRT, true, nil -} - // responder implements rest.Responder for assisting a connector in writing objects or errors. type responder struct { w http.ResponseWriter diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go index 2b112b23dfb4..e7c7aa556f8b 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go @@ -100,6 +100,9 @@ type AvailableConditionController struct { // metrics registered into legacy registry metrics *availabilityMetrics + + // hasBeenReady is signaled when the readyz endpoint succeeds for the first time. + hasBeenReady <-chan struct{} } type tlsTransportCache struct { @@ -152,6 +155,7 @@ func NewAvailableConditionController( proxyCurrentCertKeyContent certKeyFunc, serviceResolver ServiceResolver, egressSelector *egressselector.EgressSelector, + hasBeenReady <-chan struct{}, ) (*AvailableConditionController, error) { c := &AvailableConditionController{ apiServiceClient: apiServiceClient, @@ -171,6 +175,7 @@ func NewAvailableConditionController( proxyCurrentCertKeyContent: proxyCurrentCertKeyContent, tlsCache: &tlsTransportCache{transports: make(map[tlsCacheKey]http.RoundTripper)}, metrics: newAvailabilityMetrics(), + hasBeenReady: hasBeenReady, } if egressSelector != nil { @@ -233,6 +238,18 @@ func (c *AvailableConditionController) sync(key string) error { return err } + // the availability checks depend on fully initialized SDN + // OpenShift carries a few reachability checks that affect /readyz protocol + // record availability of the server so that we can + // skip posting failures to avoid getting false positives until the server becomes ready + hasBeenReady := false + select { + case <-c.hasBeenReady: + hasBeenReady = true + default: + // continue, we will skip posting only potential failures + } + // if a particular transport was specified, use that otherwise build one // construct an http client that will ignore TLS verification (if someone owns the network and messes with your status // that's not so bad) and sets a very short timeout. This is a best effort GET that provides no additional information @@ -427,6 +444,11 @@ func (c *AvailableConditionController) sync(key string) error { } if lastError != nil { + if !hasBeenReady { + // returning an error will requeue the item in an exponential fashion + return fmt.Errorf("the server hasn't been ready yet, skipping updating availability of the aggreaged API until the server becomes ready to avoid false positives, lastError = %v", lastError) + } + availableCondition.Status = apiregistrationv1.ConditionFalse availableCondition.Reason = "FailedDiscoveryCheck" availableCondition.Message = lastError.Error() diff --git a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go index 5395596fc343..7f5a275b01a0 100644 --- a/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go +++ b/vendor/k8s.io/kubectl/pkg/polymorphichelpers/logsforobject.go @@ -76,31 +76,34 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt case *corev1.Pod: // if allContainers is true, then we're going to locate all containers and then iterate through them. At that point, "allContainers" is false if !allContainers { + currOpts := new(corev1.PodLogOptions) + if opts != nil { + opts.DeepCopyInto(currOpts) + } // in case the "kubectl.kubernetes.io/default-container" annotation is present, we preset the opts.Containers to default to selected // container. This gives users ability to preselect the most interesting container in pod. - if annotations := t.GetAnnotations(); annotations != nil && len(opts.Container) == 0 { - var containerName string + if annotations := t.GetAnnotations(); annotations != nil && currOpts.Container == "" { + var defaultContainer string if len(annotations[podcmd.DefaultContainerAnnotationName]) > 0 { - containerName = annotations[podcmd.DefaultContainerAnnotationName] + defaultContainer = annotations[podcmd.DefaultContainerAnnotationName] } else if len(annotations[defaultLogsContainerAnnotationName]) > 0 { // Only log deprecation if we have only the old annotation. This allows users to // set both to support multiple versions of kubectl; if they are setting both // they must already know it is deprecated, so we don't need to add noisy // warnings. - containerName = annotations[defaultLogsContainerAnnotationName] + defaultContainer = annotations[defaultLogsContainerAnnotationName] fmt.Fprintf(os.Stderr, "Using deprecated annotation `kubectl.kubernetes.io/default-logs-container` in pod/%v. Please use `kubectl.kubernetes.io/default-container` instead\n", t.Name) } - if len(containerName) > 0 { - if exists, _ := podcmd.FindContainerByName(t, containerName); exists != nil { - opts.Container = containerName + if len(defaultContainer) > 0 { + if exists, _ := podcmd.FindContainerByName(t, defaultContainer); exists == nil { + fmt.Fprintf(os.Stderr, "Default container name %q not found in pod %s\n", defaultContainer, t.Name) } else { - fmt.Fprintf(os.Stderr, "Default container name %q not found in a pod\n", containerName) + currOpts.Container = defaultContainer } } } - var containerName string - if opts == nil || len(opts.Container) == 0 { + if currOpts.Container == "" { // We don't know container name. In this case we expect only one container to be present in the pod (ignoring InitContainers). // If there is more than one container, we should return an error showing all container names. if len(t.Spec.Containers) != 1 { @@ -117,14 +120,12 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt return nil, errors.New(err) } - containerName = t.Spec.Containers[0].Name - } else { - containerName = opts.Container + currOpts.Container = t.Spec.Containers[0].Name } - container, fieldPath := podcmd.FindContainerByName(t, containerName) + container, fieldPath := podcmd.FindContainerByName(t, currOpts.Container) if container == nil { - return nil, fmt.Errorf("container %s is not valid for pod %s", opts.Container, t.Name) + return nil, fmt.Errorf("container %s is not valid for pod %s", currOpts.Container, t.Name) } ref, err := reference.GetPartialReference(scheme.Scheme, t, fieldPath) if err != nil { @@ -132,7 +133,7 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt } ret := make(map[corev1.ObjectReference]rest.ResponseWrapper, 1) - ret[*ref] = clientset.Pods(t.Namespace).GetLogs(t.Name, opts) + ret[*ref] = clientset.Pods(t.Namespace).GetLogs(t.Name, currOpts) return ret, nil } diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go index f7b57b2582bc..7d29f8c0ce39 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go @@ -22,11 +22,14 @@ import ( "errors" "fmt" "io/ioutil" + "net" "net/http" "os" "strings" "time" + utilnode "k8s.io/kubernetes/pkg/util/node" + "github.com/fsnotify/fsnotify" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -83,6 +86,7 @@ import ( utilipvs "k8s.io/kubernetes/pkg/util/ipvs" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/utils/exec" + utilsnet "k8s.io/utils/net" utilpointer "k8s.io/utils/pointer" ) @@ -814,3 +818,36 @@ func (s *ProxyServer) CleanupAndExit() error { return nil } + +// detectNodeIP returns the nodeIP used by the proxier +// The order of precedence is: +// 1. config.bindAddress if bindAddress is not 0.0.0.0 or :: +// 2. the primary IP from the Node object, if set +// 3. if no IP is found it defaults to 127.0.0.1 and IPv4 +func detectNodeIP(client clientset.Interface, hostname, bindAddress string) net.IP { + nodeIP := net.ParseIP(bindAddress) + if nodeIP.IsUnspecified() { + nodeIP = utilnode.GetNodeIP(client, hostname) + } + if nodeIP == nil { + klog.V(0).Infof("can't determine this node's IP, assuming 127.0.0.1; if this is incorrect, please set the --bind-address flag") + nodeIP = net.ParseIP("127.0.0.1") + } + return nodeIP +} + +// nodeIPTuple takes an addresses and return a tuple (ipv4,ipv6) +// The returned tuple is guaranteed to have the order (ipv4,ipv6). The address NOT of the passed address +// will have "any" address (0.0.0.0 or ::) inserted. +func nodeIPTuple(bindAddress string) [2]net.IP { + nodes := [2]net.IP{net.IPv4zero, net.IPv6zero} + + adr := net.ParseIP(bindAddress) + if utilsnet.IsIPv6(adr) { + nodes[1] = adr + } else { + nodes[0] = adr + } + + return nodes +} diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go index 011ff019f48c..4b8f9da9e77b 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go @@ -428,23 +428,6 @@ func waitForPodCIDR(client clientset.Interface, nodeName string) (*v1.Node, erro return nil, fmt.Errorf("event object not of type node") } -// detectNodeIP returns the nodeIP used by the proxier -// The order of precedence is: -// 1. config.bindAddress if bindAddress is not 0.0.0.0 or :: -// 2. the primary IP from the Node object, if set -// 3. if no IP is found it defaults to 127.0.0.1 and IPv4 -func detectNodeIP(client clientset.Interface, hostname, bindAddress string) net.IP { - nodeIP := net.ParseIP(bindAddress) - if nodeIP.IsUnspecified() { - nodeIP = utilnode.GetNodeIP(client, hostname) - } - if nodeIP == nil { - klog.V(0).Infof("can't determine this node's IP, assuming 127.0.0.1; if this is incorrect, please set the --bind-address flag") - nodeIP = net.ParseIP("127.0.0.1") - } - return nodeIP -} - func detectNumCPU() int { // try get numCPU from /sys firstly due to a known issue (https://github.com/kubernetes/kubernetes/issues/99225) _, numCPU, err := machine.GetTopology(sysfs.NewRealSysFs()) @@ -570,22 +553,6 @@ func cidrTuple(cidrList string) [2]string { return cidrs } -// nodeIPTuple takes an addresses and return a tuple (ipv4,ipv6) -// The returned tuple is guaranteed to have the order (ipv4,ipv6). The address NOT of the passed address -// will have "any" address (0.0.0.0 or ::) inserted. -func nodeIPTuple(bindAddress string) [2]net.IP { - nodes := [2]net.IP{net.IPv4zero, net.IPv6zero} - - adr := net.ParseIP(bindAddress) - if utilsnet.IsIPv6(adr) { - nodes[1] = adr - } else { - nodes[0] = adr - } - - return nodes -} - func getProxyMode(proxyMode string, canUseIPVS bool, kcompat iptables.KernelCompatTester) string { switch proxyMode { case proxyModeUserspace: diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go index 2a625c532138..912af638fd51 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go @@ -47,7 +47,6 @@ import ( utilnetsh "k8s.io/kubernetes/pkg/util/netsh" utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/utils/exec" - utilsnet "k8s.io/utils/net" ) // NewProxyServer returns a new ProxyServer. @@ -85,6 +84,9 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi if err != nil { return nil, err } + nodeIP := detectNodeIP(client, hostname, config.BindAddress) + klog.InfoS("Detected node IP", "IP", nodeIP.String()) + eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(proxyconfigscheme.Scheme, v1.EventSource{Component: "kube-proxy", Host: hostname}) @@ -101,12 +103,11 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi } var proxier proxy.Provider - proxyMode := getProxyMode(string(config.Mode), winkernel.WindowsKernelCompatTester{}) + dualStackMode := getDualStackMode(config.Winkernel.NetworkName, winkernel.DualStackCompatTester{}) if proxyMode == proxyModeKernelspace { klog.V(0).Info("Using Kernelspace Proxier.") - isIPv6DualStackEnabled := utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) - if isIPv6DualStackEnabled { + if dualStackMode { klog.V(0).Info("creating dualStackProxier for Windows kernel.") proxier, err = winkernel.NewDualStackProxier( @@ -130,7 +131,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi int(*config.IPTables.MasqueradeBit), config.ClusterCIDR, hostname, - utilnode.GetNodeIP(client, hostname), + nodeIP, recorder, healthzServer, config.Winkernel, @@ -183,6 +184,10 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi }, nil } +func getDualStackMode(networkname string, compatTester winkernel.StackCompatTester) bool { + return compatTester.DualStackCompatible(networkname) +} + func getProxyMode(proxyMode string, kcompat winkernel.KernelCompatTester) string { if proxyMode == proxyModeKernelspace { return tryWinKernelSpaceProxy(kcompat) @@ -211,19 +216,3 @@ func tryWinKernelSpaceProxy(kcompat winkernel.KernelCompatTester) string { klog.V(1).Infof("Can't use winkernel proxy, using userspace proxier") return proxyModeUserspace } - -// nodeIPTuple takes an addresses and return a tuple (ipv4,ipv6) -// The returned tuple is guaranteed to have the order (ipv4,ipv6). The address NOT of the passed address -// will have "any" address (0.0.0.0 or ::) inserted. -func nodeIPTuple(bindAddress string) [2]net.IP { - nodes := [2]net.IP{net.IPv4zero, net.IPv6zero} - - adr := net.ParseIP(bindAddress) - if utilsnet.IsIPv6(adr) { - nodes[1] = adr - } else { - nodes[0] = adr - } - - return nodes -} diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server_linux.go b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server_linux.go index c22e24d5312f..3eef6f13d9e2 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server_linux.go +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server_linux.go @@ -29,6 +29,7 @@ func watchForLockfileContention(path string, done chan struct{}) error { } if err = watcher.AddWatch(path, inotify.InOpen|inotify.InDeleteSelf); err != nil { klog.ErrorS(err, "Unable to watch lockfile") + watcher.Close() return err } go func() { @@ -39,6 +40,7 @@ func watchForLockfileContention(path string, done chan struct{}) error { klog.ErrorS(err, "inotify watcher error") } close(done) + watcher.Close() }() return nil } diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go index 7885ebe6aab6..085dfb8cf985 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/managementcpusoverride/admission.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "reflect" "strings" "time" @@ -180,12 +181,23 @@ func (a *managementCPUsOverride) Admit(ctx context.Context, attr admission.Attri return admission.NewForbidden(attr, err) // can happen due to informer latency } + // the infrastructure status is empty, so we can not decide the cluster type + if reflect.DeepEqual(clusterInfra.Status, configv1.InfrastructureStatus{}) { + return admission.NewForbidden(attr, fmt.Errorf("%s infrastructure resource has empty status", PluginName)) + } + + // the infrastructure status is not empty, but topology related fields do not have any values indicates that + // the cluster is during the roll-back process to the version that does not support the topology fields + // the upgrade to 4.8 handled by the CR defaulting + if clusterInfra.Status.ControlPlaneTopology == "" && clusterInfra.Status.InfrastructureTopology == "" { + return nil + } + // not the SNO cluster, skip mutation // TODO: currently we supports only SNO use case because we have not yet worked out the best approach to determining whether the feature // should be on or off in a multi-node cluster, and computing that state incorrectly could lead to breaking running clusters. if clusterInfra.Status.InfrastructureTopology != configv1.SingleReplicaTopologyMode || clusterInfra.Status.ControlPlaneTopology != configv1.SingleReplicaTopologyMode { - pod.Annotations[workloadAdmissionWarning] = "only single-node clusters support workload partitioning" return nil } diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go index c95cf9b35885..5a230588fdeb 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go @@ -11,6 +11,7 @@ import ( "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/dns" "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/features" "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/image" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager" "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/network" "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/oauth" "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/project" @@ -36,6 +37,9 @@ var AllCustomResourceValidators = []string{ rolebindingrestriction.PluginName, network.PluginName, + // the kubecontrollermanager operator resource has to exist in order to run deployments to deploy admission webhooks. + kubecontrollermanager.PluginName, + // this one is special because we don't work without it. securitycontextconstraints.DefaultingPluginName, } @@ -51,6 +55,7 @@ func RegisterCustomResourceValidation(plugins *admission.Plugins) { project.Register(plugins) config.Register(plugins) scheduler.Register(plugins) + kubecontrollermanager.Register(plugins) // This plugin validates the quota.openshift.io/v1 ClusterResourceQuota resources. // NOTE: This is only allowed because it is required to get a running control plane operator. diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go new file mode 100644 index 000000000000..be688a4fa96f --- /dev/null +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager/validate_kubecontrollermanager.go @@ -0,0 +1,113 @@ +package kubecontrollermanager + +import ( + "fmt" + "io" + + operatorv1 "github.com/openshift/api/operator/v1" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "operator.openshift.io/ValidateKubeControllerManager" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + operatorv1.Resource("kubecontrollermanagers"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + operatorv1.GroupVersion.WithKind("KubeControllerManager"): kubeControllerManagerV1{}, + }) + }) +} + +func toKubeControllerManager(uncastObj runtime.Object) (*operatorv1.KubeControllerManager, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*operatorv1.KubeControllerManager) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"KubeControllerManager"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"operator.openshift.io/v1"})) + } + + return obj, nil +} + +type kubeControllerManagerV1 struct { +} + +func validateKubeControllerManagerSpecCreate(spec operatorv1.KubeControllerManagerSpec) field.ErrorList { + allErrs := field.ErrorList{} + + // on create, we allow anything + return allErrs +} + +func validateKubeControllerManagerSpecUpdate(spec, oldSpec operatorv1.KubeControllerManagerSpec) field.ErrorList { + allErrs := field.ErrorList{} + + // on update, fail if we go from secure to insecure + if oldSpec.UseMoreSecureServiceCA && !spec.UseMoreSecureServiceCA { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.useMoreSecureServiceCA"), "once enabled, the more secure service-ca.crt cannot be disabled")) + } + + return allErrs +} + +func (kubeControllerManagerV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toKubeControllerManager(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateKubeControllerManagerSpecCreate(obj.Spec)...) + + return allErrs +} + +func (kubeControllerManagerV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toKubeControllerManager(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toKubeControllerManager(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateKubeControllerManagerSpecUpdate(obj.Spec, oldObj.Spec)...) + + return allErrs +} + +func (kubeControllerManagerV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toKubeControllerManager(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toKubeControllerManager(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/apiaccess_count_controller.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/apiaccess_count_controller.go index 6b0990170015..8df0b0f13341 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/apiaccess_count_controller.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/apiaccess_count_controller.go @@ -8,8 +8,8 @@ import ( "time" apiv1 "github.com/openshift/api/apiserver/v1" - - apiclientv1 "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" + apiv1client "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -18,7 +18,7 @@ import ( ) // NewController returns a controller -func NewController(client apiclientv1.APIRequestCountInterface, nodeName string) *controller { +func NewController(client apiv1client.APIRequestCountInterface, nodeName string) *controller { ret := &controller{ client: client, nodeName: nodeName, @@ -36,9 +36,10 @@ type APIRequestLogger interface { } type controller struct { - client apiclientv1.APIRequestCountInterface + client apiv1client.APIRequestCountInterface nodeName string updatePeriod time.Duration + loadOnce sync.Once requestCountLock sync.RWMutex requestCounts *apiRequestCounts @@ -58,7 +59,7 @@ func (c *controller) IsDeprecated(resource, version, group string) bool { func (c *controller) LogRequest(resource schema.GroupVersionResource, timestamp time.Time, user, userAgent, verb string) { c.requestCountLock.RLock() defer c.requestCountLock.RUnlock() - // we snip user agents to reduce cardinality and unique keys. For well behaved agents, we see useragents about like + // we snip user agents to reduce cardinality and unique keys. For well-behaved agents, we see useragents about like // kube-controller-manager/v1.21.0 (linux/amd64) kubernetes/743bd58/kube-controller-manager // so we will snip at the first space. snippedUserAgent := userAgent @@ -94,24 +95,43 @@ func (c *controller) Start(stop <-chan struct{}) { }() // write out logs every c.updatePeriod - go wait.NonSlidingUntilWithContext(ctx, c.persistRequestCountForAllResources, c.updatePeriod) + go wait.NonSlidingUntilWithContext(ctx, c.sync, c.updatePeriod) +} +func (c *controller) sync(ctx context.Context) { + currentHour := time.Now().Hour() + c.persistRequestCountForAllResources(ctx, currentHour) } -func (c *controller) persistRequestCountForAllResources(ctx context.Context) { - klog.V(2).Infof("updating top APIRequest counts") - defer klog.V(2).Infof("finished updating top APIRequest counts") +func (c *controller) persistRequestCountForAllResources(ctx context.Context, currentHour int) { + klog.V(4).Infof("updating top APIRequest counts") + defer klog.V(4).Infof("finished updating top APIRequest counts") // get the current count to persist, start a new in-memory count countsToPersist := c.resetRequestCount() // remove stale data - expiredHour := (time.Now().Hour() + 1) % 24 - currentHour := time.Now().Hour() + expiredHour := (currentHour + 1) % 24 countsToPersist.ExpireOldestCounts(expiredHour) // when this function returns, add any remaining counts back to the total to be retried for update defer c.requestCounts.Add(countsToPersist) + // Add resources that have an existing APIRequestCount so that the current and hourly logs + // continue to rotate even if the resource has not had a request since the last restart. + c.loadOnce.Do(func() { + // As resources are never fully removed from countsToPersist, we only need to do this once. + // After the request counts have been persisted, the resources will be added "back" to the + // in memory counts (c.requestCounts, see defer statement above). + arcs, err := c.client.List(ctx, metav1.ListOptions{}) + if err != nil { + runtime.HandleError(err) // oh well, we tried + return + } + for _, arc := range arcs.Items { + countsToPersist.Resource(apiNameToResource(arc.Name)) + } + }) + var wg sync.WaitGroup for gvr := range countsToPersist.resourceToRequestCount { resourceCount := countsToPersist.Resource(gvr) @@ -127,13 +147,14 @@ func (c *controller) persistRequestCountForAllResources(ctx context.Context) { func (c *controller) persistRequestCountForResource(ctx context.Context, wg *sync.WaitGroup, currentHour, expiredHour int, localResourceCount *resourceRequestCounts) { defer wg.Done() - klog.V(2).Infof("updating top %v APIRequest counts", localResourceCount.resource) - defer klog.V(2).Infof("finished updating top %v APIRequest counts", localResourceCount.resource) + klog.V(4).Infof("updating top %v APIRequest counts", localResourceCount.resource) + defer klog.V(4).Infof("finished updating top %v APIRequest counts", localResourceCount.resource) status, _, err := v1helpers.ApplyStatus( ctx, c.client, resourceToAPIName(localResourceCount.resource), + nodeStatusDefaulter(c.nodeName, currentHour, expiredHour, localResourceCount.resource), SetRequestCountsForNode(c.nodeName, currentHour, expiredHour, localResourceCount), ) if err != nil { @@ -161,10 +182,10 @@ func removePersistedRequestCounts(nodeName string, currentHour int, persistedSta if persistedNodeCount.NodeName != nodeName { continue } - for _, peristedUserCount := range persistedNodeCount.ByUser { + for _, persistedUserCount := range persistedNodeCount.ByUser { userKey := userKey{ - user: peristedUserCount.UserName, - userAgent: peristedUserCount.UserAgent, + user: persistedUserCount.UserName, + userAgent: persistedUserCount.UserAgent, } localResourceCount.Hour(currentHour).RemoveUser(userKey) } @@ -187,3 +208,12 @@ func resourceToAPIName(resource schema.GroupVersionResource) string { } return apiName } + +func apiNameToResource(name string) schema.GroupVersionResource { + segments := strings.SplitN(name, ".", 3) + result := schema.GroupVersionResource{Resource: segments[0], Version: segments[1]} + if len(segments) > 2 { + result.Group = segments[2] + } + return result +} diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/request_counts.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/request_counts.go index e581576277a9..3a4a04a46b63 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/request_counts.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/request_counts.go @@ -237,7 +237,7 @@ func (c *resourceRequestCounts) String() string { type hourlyRequestCounts struct { lock sync.RWMutex - // countToSupress is the number of requests to remove from the count to avoid double counting in persistence + // countToSuppress is the number of requests to remove from the count to avoid double counting in persistence // TODO I think I'd like this in look-aside data, but I don't see an easy way to plumb it. countToSuppress int64 usersToRequestCounts map[userKey]*userRequestCounts diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/update_func.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/update_func.go index 3723c55bb0af..4bc1fe0aa26a 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/update_func.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/update_func.go @@ -9,7 +9,7 @@ import ( "k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/v1helpers" ) -// IncrementRequestCounts add additional api request counts to the log. +// SetRequestCountsForNode add additional api request counts to the log. // countsToPersist must not be mutated func SetRequestCountsForNode(nodeName string, currentHour, expiredHour int, countsToPersist *resourceRequestCounts) v1helpers.UpdateStatusFunc { return func(maxNumUsers int, status *apiv1.APIRequestCountStatus) { @@ -31,6 +31,10 @@ func SetRequestCountsForNode(nodeName string, currentHour, expiredHour int, coun } } +func nodeStatusDefaulter(nodeName string, currentHour, expiredHour int, resource schema.GroupVersionResource) v1helpers.UpdateStatusFunc { + return SetRequestCountsForNode(nodeName, currentHour, expiredHour, newResourceRequestCounts(resource)) +} + func setRequestCountsForNode(status *apiv1.APIRequestCountStatus, nodeName string, currentHour, expiredHour int, hourlyNodeRequests []apiv1.PerNodeAPIRequestLog) *apiv1.APIRequestCountStatus { newStatus := status.DeepCopy() newStatus.Last24h = []apiv1.PerResourceAPIRequestLog{} diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/v1helpers/helpers.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/v1helpers/helpers.go index 17d376b97852..e5ebd7bc6259 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/v1helpers/helpers.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/filters/deprecatedapirequest/v1helpers/helpers.go @@ -3,9 +3,8 @@ package v1helpers import ( "context" - apiclientv1 "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" - apiv1 "github.com/openshift/api/apiserver/v1" + apiv1client "github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -14,43 +13,57 @@ import ( type UpdateStatusFunc func(maxNumUsers int, status *apiv1.APIRequestCountStatus) -func ApplyStatus(ctx context.Context, client apiclientv1.APIRequestCountInterface, name string, updateFuncs ...UpdateStatusFunc) (*apiv1.APIRequestCountStatus, bool, error) { +func ApplyStatus(ctx context.Context, client apiv1client.APIRequestCountInterface, name string, statusDefaulter UpdateStatusFunc, updateFuncs ...UpdateStatusFunc) (*apiv1.APIRequestCountStatus, bool, error) { updated := false var updatedStatus *apiv1.APIRequestCountStatus err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - check, err := client.Get(ctx, name, metav1.GetOptions{}) + existingOrDefaultAPIRequestCount, err := client.Get(ctx, name, metav1.GetOptions{}) if errors.IsNotFound(err) { - // on a not found, let's create this thing. - requestCount := &apiv1.APIRequestCount{ + // APIRequestCount might have been purposely deleted. We will + // try to create it again further below if there is a need to. + existingOrDefaultAPIRequestCount = &apiv1.APIRequestCount{ ObjectMeta: metav1.ObjectMeta{Name: name}, - Spec: apiv1.APIRequestCountSpec{ - NumberOfUsersToReport: 10, - }, + Spec: apiv1.APIRequestCountSpec{NumberOfUsersToReport: 10}, } - check, err = client.Create(ctx, requestCount, metav1.CreateOptions{}) - } - if err != nil { + // make sure the status doesn't result in a diff on a no-op. + statusDefaulter(10, &existingOrDefaultAPIRequestCount.Status) + } else if err != nil { return err } - oldStatus := check.Status + oldStatus := existingOrDefaultAPIRequestCount.Status newStatus := oldStatus.DeepCopy() for _, update := range updateFuncs { - update(int(check.Spec.NumberOfUsersToReport), newStatus) + update(int(existingOrDefaultAPIRequestCount.Spec.NumberOfUsersToReport), newStatus) } - if equality.Semantic.DeepEqual(oldStatus, newStatus) { + if equality.Semantic.DeepEqual(&oldStatus, newStatus) { updatedStatus = newStatus return nil } - check, err = client.Get(ctx, name, metav1.GetOptions{}) + + // At this point the status has been semantically changed by the updateFuncs, + // possibly due to new requests, hourly log expiration, and so on. + + existingAPIRequestCount, err := client.Get(ctx, name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + // APIRequestCount might have been purposely deleted, but new requests + // have come in, so let's re-create the APIRequestCount resource. + newAPIRequestCount := &apiv1.APIRequestCount{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: apiv1.APIRequestCountSpec{ + NumberOfUsersToReport: 10, + }, + } + existingAPIRequestCount, err = client.Create(ctx, newAPIRequestCount, metav1.CreateOptions{}) + } if err != nil { return err } - check.Status = *newStatus - updatedCheck, err := client.UpdateStatus(ctx, check, metav1.UpdateOptions{}) + existingAPIRequestCount.Status = *newStatus + updatedAPIRequestCount, err := client.UpdateStatus(ctx, existingAPIRequestCount, metav1.UpdateOptions{}) if err != nil { return err } - updatedStatus = &updatedCheck.Status + updatedStatus = &updatedAPIRequestCount.Status updated = true return err }) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go index 0aa0bde368a6..3e7a8bb15cf3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go @@ -21,7 +21,7 @@ import ( "reflect" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -140,7 +140,7 @@ func legacyValidateEvent(event *core.Event) field.ErrorList { } } else { - if len(event.InvolvedObject.Namespace) == 0 && event.Namespace != metav1.NamespaceSystem { + if len(event.InvolvedObject.Namespace) == 0 && event.Namespace != metav1.NamespaceDefault && event.Namespace != metav1.NamespaceSystem { allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) } if len(event.ReportingController) == 0 { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go index 4a85d23d39c8..5ea528560563 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go @@ -189,7 +189,7 @@ func Convert_networking_IngressBackend_To_v1beta1_IngressBackend(in *networking. func Convert_v1beta1_IngressSpec_To_networking_IngressSpec(in *extensionsv1beta1.IngressSpec, out *networking.IngressSpec, s conversion.Scope) error { if err := autoConvert_v1beta1_IngressSpec_To_networking_IngressSpec(in, out, s); err != nil { - return nil + return err } if in.Backend != nil { out.DefaultBackend = &networking.IngressBackend{} @@ -202,7 +202,7 @@ func Convert_v1beta1_IngressSpec_To_networking_IngressSpec(in *extensionsv1beta1 func Convert_networking_IngressSpec_To_v1beta1_IngressSpec(in *networking.IngressSpec, out *extensionsv1beta1.IngressSpec, s conversion.Scope) error { if err := autoConvert_networking_IngressSpec_To_v1beta1_IngressSpec(in, out, s); err != nil { - return nil + return err } if in.DefaultBackend != nil { out.Backend = &extensionsv1beta1.IngressBackend{} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1beta1/conversion.go index 42df29a408d3..5f4553cfe05d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1beta1/conversion.go @@ -52,7 +52,7 @@ func Convert_networking_IngressBackend_To_v1beta1_IngressBackend(in *networking. } func Convert_v1beta1_IngressSpec_To_networking_IngressSpec(in *v1beta1.IngressSpec, out *networking.IngressSpec, s conversion.Scope) error { if err := autoConvert_v1beta1_IngressSpec_To_networking_IngressSpec(in, out, s); err != nil { - return nil + return err } if in.Backend != nil { out.DefaultBackend = &networking.IngressBackend{} @@ -65,7 +65,7 @@ func Convert_v1beta1_IngressSpec_To_networking_IngressSpec(in *v1beta1.IngressSp func Convert_networking_IngressSpec_To_v1beta1_IngressSpec(in *networking.IngressSpec, out *v1beta1.IngressSpec, s conversion.Scope) error { if err := autoConvert_networking_IngressSpec_To_v1beta1_IngressSpec(in, out, s); err != nil { - return nil + return err } if in.DefaultBackend != nil { out.Backend = &v1beta1.IngressBackend{} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics/metrics.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics/metrics.go index 1ad451bbfe2e..02e80c130d2c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics/metrics.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics/metrics.go @@ -41,7 +41,7 @@ var ( Subsystem: VolumeSchedulerSubsystem, Name: "scheduling_duration_seconds", Help: "Volume scheduling stage latency (Deprecated since 1.19.0)", - Buckets: metrics.ExponentialBuckets(1000, 2, 15), + Buckets: metrics.ExponentialBuckets(0.001, 2, 15), StabilityLevel: metrics.ALPHA, DeprecatedVersion: "1.19.0", }, diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go index bd7415a945b7..181e5616a135 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go @@ -449,7 +449,8 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont Major: libcontainerdevices.Wildcard, }, }, - SkipDevices: true, + SkipDevices: true, + SkipFreezeOnSet: true, } if resourceConfig == nil { return resources diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle.go index 278f13eb08e2..92b36c2f9af8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/internal_container_lifecycle.go @@ -50,10 +50,7 @@ func (i *internalContainerLifecycleImpl) PreStartContainer(pod *v1.Pod, containe } if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManager) { - err := i.topologyManager.AddContainer(pod, containerID) - if err != nil { - return err - } + i.topologyManager.AddContainer(pod, container, containerID) } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go index 063cac65a27d..1e7a831108dd 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go @@ -39,9 +39,8 @@ func (m *fakeManager) AddHintProvider(h HintProvider) { klog.InfoS("AddHintProvider", "hintProvider", h) } -func (m *fakeManager) AddContainer(pod *v1.Pod, containerID string) error { - klog.InfoS("AddContainer", "pod", klog.KObj(pod), "containerID", containerID) - return nil +func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) { + klog.InfoS("AddContainer", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID) } func (m *fakeManager) RemoveContainer(containerID string) error { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope.go index af90663368a4..c5c6f36be972 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) @@ -43,7 +44,7 @@ type Scope interface { // wants to be consoluted with when making topology hints AddHintProvider(h HintProvider) // AddContainer adds pod to Manager for tracking - AddContainer(pod *v1.Pod, containerID string) error + AddContainer(pod *v1.Pod, container *v1.Container, containerID string) // RemoveContainer removes pod from Manager tracking RemoveContainer(containerID string) error // Store is the interface for storing pod topology hints @@ -60,8 +61,8 @@ type scope struct { hintProviders []HintProvider // Topology Manager Policy policy Policy - // Mapping of PodUID to ContainerID for Adding/Removing Pods from PodTopologyHints mapping - podMap map[string]string + // Mapping of (PodUid, ContainerName) to ContainerID for Adding/Removing Pods from PodTopologyHints mapping + podMap containermap.ContainerMap } func (s *scope) Name() string { @@ -94,12 +95,11 @@ func (s *scope) AddHintProvider(h HintProvider) { // It would be better to implement this function in topologymanager instead of scope // but topologymanager do not track mapping anymore -func (s *scope) AddContainer(pod *v1.Pod, containerID string) error { +func (s *scope) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) { s.mutex.Lock() defer s.mutex.Unlock() - s.podMap[containerID] = string(pod.UID) - return nil + s.podMap.Add(string(pod.UID), container.Name, containerID) } // It would be better to implement this function in topologymanager instead of scope @@ -109,10 +109,18 @@ func (s *scope) RemoveContainer(containerID string) error { defer s.mutex.Unlock() klog.InfoS("RemoveContainer", "containerID", containerID) - podUIDString := s.podMap[containerID] - delete(s.podMap, containerID) - if _, exists := s.podTopologyHints[podUIDString]; exists { - delete(s.podTopologyHints[podUIDString], containerID) + // Get the podUID and containerName associated with the containerID to be removed and remove it + podUIDString, containerName, err := s.podMap.GetContainerRef(containerID) + if err != nil { + return nil + } + s.podMap.RemoveByContainerID(containerID) + + // In cases where a container has been restarted, it's possible that the same podUID and + // containerName are already associated with a *different* containerID now. Only remove + // the TopologyHints associated with that podUID and containerName if this is not true + if _, err := s.podMap.GetContainerID(podUIDString, containerName); err != nil { + delete(s.podTopologyHints[podUIDString], containerName) if len(s.podTopologyHints[podUIDString]) == 0 { delete(s.podTopologyHints, podUIDString) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_container.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_container.go index e5d331e00e92..de45209625a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_container.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_container.go @@ -19,6 +19,7 @@ package topologymanager import ( "k8s.io/api/core/v1" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) @@ -36,7 +37,7 @@ func NewContainerScope(policy Policy) Scope { name: containerTopologyScope, podTopologyHints: podTopologyHints{}, policy: policy, - podMap: make(map[string]string), + podMap: containermap.NewContainerMap(), }, } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_pod.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_pod.go index f4645bc4d768..9ccc6414dd9f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_pod.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_pod.go @@ -19,6 +19,7 @@ package topologymanager import ( "k8s.io/api/core/v1" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) @@ -36,7 +37,7 @@ func NewPodScope(policy Policy) Scope { name: podTopologyScope, podTopologyHints: podTopologyHints{}, policy: policy, - podMap: make(map[string]string), + podMap: containermap.NewContainerMap(), }, } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go index f1e435260dec..4f327e6efc04 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go @@ -46,7 +46,7 @@ type Manager interface { // wants to be consulted with when making topology hints AddHintProvider(HintProvider) // AddContainer adds pod to Manager for tracking - AddContainer(pod *v1.Pod, containerID string) error + AddContainer(pod *v1.Pod, container *v1.Container, containerID string) // RemoveContainer removes pod from Manager tracking RemoveContainer(containerID string) error // Store is the interface for storing pod topology hints @@ -175,8 +175,8 @@ func (m *manager) AddHintProvider(h HintProvider) { m.scope.AddHintProvider(h) } -func (m *manager) AddContainer(pod *v1.Pod, containerID string) error { - return m.scope.AddContainer(pod, containerID) +func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) { + m.scope.AddContainer(pod, container, containerID) } func (m *manager) RemoveContainer(containerID string) error { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go index 9396a5c62c47..29a122f585b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go @@ -18,6 +18,8 @@ package kubelet import ( "fmt" + "io/ioutil" + "path/filepath" "syscall" v1 "k8s.io/api/core/v1" @@ -110,6 +112,57 @@ func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, o return physicalMounter, nil } +// removeOrphanedPodVolumeDirs attempts to remove the pod volumes directory and +// its subdirectories. There should be no files left under normal conditions +// when this is called, so it effectively does a recursive rmdir instead of +// RemoveAll to ensure it only removes directories and not regular files. +func (kl *Kubelet) removeOrphanedPodVolumeDirs(uid types.UID) []error { + orphanVolumeErrors := []error{} + + // If there are still volume directories, attempt to rmdir them + volumePaths, err := kl.getPodVolumePathListFromDisk(uid) + if err != nil { + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err)) + return orphanVolumeErrors + } + if len(volumePaths) > 0 { + for _, volumePath := range volumePaths { + if err := syscall.Rmdir(volumePath); err != nil { + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() volume at path %v: %v", uid, volumePath, err)) + } else { + klog.InfoS("Cleaned up orphaned volume from pod", "podUID", uid, "path", volumePath) + } + } + } + + // If there are any volume-subpaths, attempt to rmdir them + subpathVolumePaths, err := kl.getPodVolumeSubpathListFromDisk(uid) + if err != nil { + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err)) + return orphanVolumeErrors + } + if len(subpathVolumePaths) > 0 { + for _, subpathVolumePath := range subpathVolumePaths { + if err := syscall.Rmdir(subpathVolumePath); err != nil { + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() subpath at path %v: %v", uid, subpathVolumePath, err)) + } else { + klog.InfoS("Cleaned up orphaned volume subpath from pod", "podUID", uid, "path", subpathVolumePath) + } + } + } + + // Remove any remaining subdirectories along with the volumes directory itself. + // Fail if any regular files are encountered. + podVolDir := kl.getPodVolumesDir(uid) + if err := removeall.RemoveDirsOneFilesystem(kl.mounter, podVolDir); err != nil { + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred when trying to remove the volumes dir", uid, err)) + } else { + klog.InfoS("Cleaned up orphaned pod volumes dir", "podUID", uid, "path", podVolDir) + } + + return orphanVolumeErrors +} + // cleanupOrphanedPodDirs removes the volumes of pods that should not be // running and that have no containers running. Note that we roll up logs here since it runs in the main loop. func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecontainer.Pod) error { @@ -147,55 +200,48 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon continue } - allVolumesCleanedUp := true - - // If there are still volume directories, attempt to rmdir them - volumePaths, err := kl.getPodVolumePathListFromDisk(uid) - if err != nil { - orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err)) + // Attempt to remove the pod volumes directory and its subdirs + podVolumeErrors := kl.removeOrphanedPodVolumeDirs(uid) + if len(podVolumeErrors) > 0 { + orphanVolumeErrors = append(orphanVolumeErrors, podVolumeErrors...) + // Not all volumes were removed, so don't clean up the pod directory yet. It is likely + // that there are still mountpoints or files left which could cause removal of the pod + // directory to fail below. + // Errors for all removal operations have already been recorded, so don't add another + // one here. continue } - if len(volumePaths) > 0 { - for _, volumePath := range volumePaths { - if err := syscall.Rmdir(volumePath); err != nil { - orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() volume at path %v: %v", uid, volumePath, err)) - allVolumesCleanedUp = false - } else { - klog.InfoS("Cleaned up orphaned volume from pod", "podUID", uid, "path", volumePath) - } - } - } - // If there are any volume-subpaths, attempt to rmdir them - subpathVolumePaths, err := kl.getPodVolumeSubpathListFromDisk(uid) + // Call RemoveAllOneFilesystem for remaining subdirs under the pod directory + podDir := kl.getPodDir(uid) + podSubdirs, err := ioutil.ReadDir(podDir) if err != nil { - orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err)) + klog.ErrorS(err, "Could not read directory", "path", podDir) + orphanRemovalErrors = append(orphanRemovalErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred during reading the pod dir from disk", uid, err)) continue } - if len(subpathVolumePaths) > 0 { - for _, subpathVolumePath := range subpathVolumePaths { - if err := syscall.Rmdir(subpathVolumePath); err != nil { - orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() subpath at path %v: %v", uid, subpathVolumePath, err)) - allVolumesCleanedUp = false - } else { - klog.InfoS("Cleaned up orphaned volume subpath from pod", "podUID", uid, "path", subpathVolumePath) - } + for _, podSubdir := range podSubdirs { + podSubdirName := podSubdir.Name() + podSubdirPath := filepath.Join(podDir, podSubdirName) + // Never attempt RemoveAllOneFilesystem on the volumes directory, + // as this could lead to data loss in some situations. The volumes + // directory should have been removed by removeOrphanedPodVolumeDirs. + if podSubdirName == "volumes" { + err := fmt.Errorf("volumes subdir was found after it was removed") + klog.ErrorS(err, "Orphaned pod found, but failed to remove volumes subdir", "podUID", uid, "path", podSubdirPath) + continue + } + if err := removeall.RemoveAllOneFilesystem(kl.mounter, podSubdirPath); err != nil { + klog.ErrorS(err, "Failed to remove orphaned pod subdir", "podUID", uid, "path", podSubdirPath) + orphanRemovalErrors = append(orphanRemovalErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred when trying to remove subdir %q", uid, err, podSubdirPath)) } } - if !allVolumesCleanedUp { - // Not all volumes were removed, so don't clean up the pod directory yet. It is likely - // that there are still mountpoints left which could stall RemoveAllOneFilesystem which - // would otherwise be called below. - // Errors for all removal operations have already been recorded, so don't add another - // one here. - continue - } - + // Rmdir the pod dir, which should be empty if everything above was successful klog.V(3).InfoS("Orphaned pod found, removing", "podUID", uid) - if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil { + if err := syscall.Rmdir(podDir); err != nil { klog.ErrorS(err, "Failed to remove orphaned pod dir", "podUID", uid) - orphanRemovalErrors = append(orphanRemovalErrors, err) + orphanRemovalErrors = append(orphanRemovalErrors, fmt.Errorf("orphaned pod %q found, but error %v occurred when trying to remove the pod directory", uid, err)) } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go index 148922998aa3..b007fa0ab612 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/logs/container_log_manager.go @@ -138,9 +138,6 @@ func parseMaxSize(size string) (int64, error) { if !ok { return 0, fmt.Errorf("invalid max log size") } - if maxSize < 0 { - return 0, fmt.Errorf("negative max log size %d", maxSize) - } return maxSize, nil } @@ -161,6 +158,10 @@ func NewContainerLogManager(runtimeService internalapi.RuntimeService, osInterfa if err != nil { return nil, fmt.Errorf("failed to parse container log max size %q: %v", maxSize, err) } + // Negative number means to disable container log rotation + if parsedMaxSize < 0 { + return NewStubContainerLogManager(), nil + } // policy LogRotatePolicy return &containerLogManager{ osInterface: osInterface, diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go index 0bf9ad9b4511..e5abd958c089 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go @@ -151,6 +151,7 @@ func ListenAndServeKubeletServer( s := &http.Server{ Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)), Handler: &handler, + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout ReadTimeout: 4 * 60 * time.Minute, WriteTimeout: 4 * 60 * time.Minute, MaxHeaderBytes: 1 << 20, @@ -178,6 +179,9 @@ func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer st server := &http.Server{ Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)), Handler: &s, + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadTimeout: 4 * 60 * time.Minute, + WriteTimeout: 4 * 60 * time.Minute, MaxHeaderBytes: 1 << 20, } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go index ba0bfe35bc04..98bc63032363 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go @@ -95,7 +95,11 @@ func (i *objectCacheItem) setImmutable() { func (i *objectCacheItem) stopIfIdle(now time.Time, maxIdleTime time.Duration) bool { i.lock.Lock() defer i.lock.Unlock() - if !i.stopped && now.After(i.lastAccessTime.Add(maxIdleTime)) { + // Ensure that we don't try to stop not yet initialized reflector. + // In case of overloaded kube-apiserver, if the list request is + // already being processed, all the work would lost and would have + // to be retried. + if !i.stopped && i.store.hasSynced() && now.After(i.lastAccessTime.Add(maxIdleTime)) { return i.stopThreadUnsafe() } return false @@ -287,11 +291,14 @@ func (c *objectCache) Get(namespace, name string) (runtime.Object, error) { if !exists { return nil, fmt.Errorf("object %q/%q not registered", namespace, name) } + // Record last access time independently if it succeeded or not. + // This protects from premature (racy) reflector closure. + item.setLastAccessTime(c.clock.Now()) + item.restartReflectorIfNeeded() if err := wait.PollImmediate(10*time.Millisecond, time.Second, item.hasSynced); err != nil { return nil, fmt.Errorf("failed to sync %s cache: %v", c.groupResource.String(), err) } - item.setLastAccessTime(c.clock.Now()) obj, exists, err := item.store.GetByKey(c.key(namespace, name)) if err != nil { return nil, err diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 70a4c3d34097..8245db33ea08 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -504,6 +504,11 @@ func (asw *actualStateOfWorld) AddPodToVolume(markVolumeOpts operationexecutor.M // If pod exists, reset remountRequired value podObj.remountRequired = false podObj.volumeMountStateForPod = markVolumeOpts.VolumeMountState + if mounter != nil { + // The mounter stored in the object may have old information, + // use the newest one. + podObj.mounter = mounter + } asw.attachedVolumes[volumeName].mountedPods[podName] = podObj return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go index 1d6b5551036e..e71a5e873071 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go @@ -843,6 +843,9 @@ func (proxier *Proxier) syncProxyRules() { for _, extIP := range svcInfo.ExternalIPStrings() { conntrackCleanupServiceIPs.Insert(extIP) } + for _, lbIP := range svcInfo.LoadBalancerIPStrings() { + conntrackCleanupServiceIPs.Insert(lbIP) + } nodePort := svcInfo.NodePort() if svcInfo.Protocol() == v1.ProtocolUDP && nodePort != 0 { klog.V(2).Infof("Stale %s service NodePort %v -> %d", strings.ToLower(string(svcInfo.Protocol())), svcPortName, nodePort) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/topology.go b/vendor/k8s.io/kubernetes/pkg/proxy/topology.go index 377cc65184cf..89223072361f 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/topology.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/topology.go @@ -81,12 +81,12 @@ func filterEndpointsWithHints(endpoints []Endpoint, hintsAnnotation string, node } } - if len(filteredEndpoints) > 0 { + if len(filteredEndpoints) == 0 { klog.Warningf("Skipping topology aware endpoint filtering since no hints were provided for zone %s", zone) - return filteredEndpoints + return endpoints } - return endpoints + return filteredEndpoints } // deprecatedTopologyFilter returns the appropriate endpoints based on the diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go index 382d8023d736..4a63b1d46e20 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go @@ -37,7 +37,9 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + apiutil "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" @@ -138,6 +140,101 @@ type remoteSubnetInfo struct { const NETWORK_TYPE_OVERLAY = "overlay" +func newHostNetworkService() (HostNetworkService, hcn.SupportedFeatures) { + var hns HostNetworkService + hns = hnsV1{} + supportedFeatures := hcn.GetSupportedFeatures() + if supportedFeatures.Api.V2 { + hns = hnsV2{} + } + + return hns, supportedFeatures +} + +func getNetworkName(hnsNetworkName string) (string, error) { + if len(hnsNetworkName) == 0 { + klog.V(3).InfoS("network-name flag not set. Checking environment variable") + hnsNetworkName = os.Getenv("KUBE_NETWORK") + if len(hnsNetworkName) == 0 { + return "", fmt.Errorf("Environment variable KUBE_NETWORK and network-flag not initialized") + } + } + return hnsNetworkName, nil +} + +func getNetworkInfo(hns HostNetworkService, hnsNetworkName string) (*hnsNetworkInfo, error) { + hnsNetworkInfo, err := hns.getNetworkByName(hnsNetworkName) + for err != nil { + klog.ErrorS(err, "Unable to find HNS Network specified. Please check network name and CNI deployment", "hnsNetworkName", hnsNetworkName) + time.Sleep(1 * time.Second) + hnsNetworkInfo, err = hns.getNetworkByName(hnsNetworkName) + } + return hnsNetworkInfo, err +} + +func isOverlay(hnsNetworkInfo *hnsNetworkInfo) bool { + return strings.EqualFold(hnsNetworkInfo.networkType, NETWORK_TYPE_OVERLAY) +} + +// StackCompatTester tests whether the required kernel and network are dualstack capable +type StackCompatTester interface { + DualStackCompatible(networkName string) bool +} + +type DualStackCompatTester struct{} + +func (t DualStackCompatTester) DualStackCompatible(networkName string) bool { + dualStackFeatureEnabled := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.IPv6DualStack) + if !dualStackFeatureEnabled { + return false + } + + globals, err := hcn.GetGlobals() + if err != nil { + klog.ErrorS(err, "Unable to determine networking stack version. Falling back to single-stack") + return false + } + + if !kernelSupportsDualstack(globals.Version) { + klog.InfoS("This version of Windows does not support dual-stack. Falling back to single-stack") + return false + } + + // check if network is using overlay + hns, _ := newHostNetworkService() + networkName, err = getNetworkName(networkName) + if err != nil { + klog.ErrorS(err, "unable to determine dual-stack status %v. Falling back to single-stack") + return false + } + networkInfo, err := getNetworkInfo(hns, networkName) + if err != nil { + klog.ErrorS(err, "unable to determine dual-stack status %v. Falling back to single-stack") + return false + } + + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WinOverlay) && isOverlay(networkInfo) { + // Overlay (VXLAN) networks on Windows do not support dual-stack networking today + klog.InfoS("Winoverlay does not support dual-stack. Falling back to single-stack") + return false + } + + return true +} + +// The hcsshim version logic has a bug that did not calculate the versioning of DualStack correctly. +// DualStack is supported in WS 2004+ (10.0.19041+) where HCN component version is 11.10+ +// https://github.com/microsoft/hcsshim/pull/1003#issuecomment-827930358 +func kernelSupportsDualstack(currentVersion hcn.Version) bool { + hnsVersion := fmt.Sprintf("%d.%d.0", currentVersion.Major, currentVersion.Minor) + v, err := version.ParseSemantic(hnsVersion) + if err != nil { + return false + } + + return v.AtLeast(version.MustParseSemantic("11.10.0")) +} + func Log(v interface{}, message string, level klog.Level) { klog.V(level).InfoS("%s", message, "spewConfig", spewSdump(v)) } @@ -354,7 +451,7 @@ func newSourceVIP(hns HostNetworkService, network string, ip string, mac string, func (ep *endpointsInfo) Cleanup() { Log(ep, "Endpoint Cleanup", 3) - if ep.refCount != nil { + if !ep.GetIsLocal() && ep.refCount != nil { *ep.refCount-- // Remove the remote hns endpoint, if no service is referring it @@ -543,36 +640,24 @@ func NewProxier( } serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder) - var hns HostNetworkService - hns = hnsV1{} - supportedFeatures := hcn.GetSupportedFeatures() - if supportedFeatures.Api.V2 { - hns = hnsV2{} - } - - hnsNetworkName := config.NetworkName - if len(hnsNetworkName) == 0 { - klog.V(3).InfoS("network-name flag not set. Checking environment variable") - hnsNetworkName = os.Getenv("KUBE_NETWORK") - if len(hnsNetworkName) == 0 { - return nil, fmt.Errorf("Environment variable KUBE_NETWORK and network-flag not initialized") - } + hns, supportedFeatures := newHostNetworkService() + hnsNetworkName, err := getNetworkName(config.NetworkName) + if err != nil { + return nil, err } klog.V(3).InfoS("Cleaning up old HNS policy lists") deleteAllHnsLoadBalancerPolicy() // Get HNS network information - hnsNetworkInfo, err := hns.getNetworkByName(hnsNetworkName) - for err != nil { - klog.ErrorS(err, "Unable to find HNS Network specified. Please check network name and CNI deployment", "hnsNetworkName", hnsNetworkName) - time.Sleep(1 * time.Second) - hnsNetworkInfo, err = hns.getNetworkByName(hnsNetworkName) + hnsNetworkInfo, err := getNetworkInfo(hns, hnsNetworkName) + if err != nil { + return nil, err } // Network could have been detected before Remote Subnet Routes are applied or ManagementIP is updated // Sleep and update the network to include new information - if strings.EqualFold(hnsNetworkInfo.networkType, NETWORK_TYPE_OVERLAY) { + if isOverlay(hnsNetworkInfo) { time.Sleep(10 * time.Second) hnsNetworkInfo, err = hns.getNetworkByName(hnsNetworkName) if err != nil { @@ -592,7 +677,7 @@ func NewProxier( var sourceVip string var hostMac string - if strings.EqualFold(hnsNetworkInfo.networkType, NETWORK_TYPE_OVERLAY) { + if isOverlay(hnsNetworkInfo) { if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.WinOverlay) { return nil, fmt.Errorf("WinOverlay feature gate not enabled") } @@ -605,6 +690,15 @@ func NewProxier( return nil, fmt.Errorf("source-vip flag not set") } + if nodeIP.IsUnspecified() { + // attempt to get the correct ip address + klog.V(2).InfoS("node ip was unspecified. Attempting to find node ip") + nodeIP, err = apiutil.ResolveBindAddress(nodeIP) + if err != nil { + klog.InfoS("failed to find an ip. You may need set the --bind-address flag", "err", err) + } + } + interfaces, _ := net.Interfaces() //TODO create interfaces for _, inter := range interfaces { addresses, _ := inter.Addrs() @@ -1157,10 +1251,10 @@ func (proxier *Proxier) syncProxyRules() { } else { // We only share the refCounts for remote endpoints ep.refCount = proxier.endPointsRefCount.getRefCount(newHnsEndpoint.hnsID) + *ep.refCount++ } ep.hnsID = newHnsEndpoint.hnsID - *ep.refCount++ Log(ep, "Endpoint resource found", 3) } @@ -1265,7 +1359,7 @@ func (proxier *Proxier) syncProxyRules() { } hnsLoadBalancer, err := hns.getLoadBalancer( lbIngressEndpoints, - loadBalancerFlags{isDSR: svcInfo.preserveDIP || proxier.isDSR || svcInfo.localTrafficDSR, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, + loadBalancerFlags{isDSR: svcInfo.preserveDIP || svcInfo.localTrafficDSR, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP, sessionAffinity: sessionAffinityClientIP, isIPv6: proxier.isIPv6Mode}, sourceVip, lbIngressIP.ip, Enum(svcInfo.Protocol()), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/rest.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/rest.go index 95ea56b54bf4..f13489a0b22b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/rest.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/rest.go @@ -744,6 +744,12 @@ func (rs *REST) handleClusterIPsForUpdatedService(oldService *api.Service, servi } // CASE B: + + // if headless service then we bail out early (no clusterIPs management needed) + if len(oldService.Spec.ClusterIPs) > 0 && oldService.Spec.ClusterIPs[0] == api.ClusterIPNone { + return nil, nil, nil + } + // Update service from non-ExternalName to ExternalName, should release ClusterIP if exists. if oldService.Spec.Type != api.ServiceTypeExternalName && service.Spec.Type == api.ServiceTypeExternalName { toRelease = make(map[api.IPFamily]string) @@ -760,11 +766,6 @@ func (rs *REST) handleClusterIPsForUpdatedService(oldService *api.Service, servi return nil, toRelease, nil } - // if headless service then we bail out early (no clusterIPs management needed) - if len(oldService.Spec.ClusterIPs) > 0 && oldService.Spec.ClusterIPs[0] == api.ClusterIPNone { - return nil, nil, nil - } - // upgrade and downgrade are specific to dualstack if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) { return nil, nil, nil diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/strategy.go index db60cc2290f6..1f1fa0771101 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/strategy.go @@ -119,6 +119,7 @@ func (strategy svcStrategy) PrepareForUpdate(ctx context.Context, obj, old runti oldService := old.(*api.Service) newService.Status = oldService.Status + patchAllocatedValues(newService, oldService) NormalizeClusterIPs(oldService, newService) dropServiceDisabledFields(newService, oldService) dropTypeDependentFields(newService, oldService) @@ -302,6 +303,68 @@ func (serviceStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtim return validation.ValidateServiceStatusUpdate(obj.(*api.Service), old.(*api.Service)) } +// WarningsOnUpdate returns warnings for the given update. +func (serviceStatusStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { + return nil +} + +// patchAllocatedValues allows clients to avoid a read-modify-write cycle while +// preserving values that we allocated on their behalf. For example, they +// might create a Service without specifying the ClusterIP, in which case we +// allocate one. If they resubmit that same YAML, we want it to succeed. +func patchAllocatedValues(newSvc, oldSvc *api.Service) { + if needsClusterIP(oldSvc) && needsClusterIP(newSvc) { + if newSvc.Spec.ClusterIP == "" { + newSvc.Spec.ClusterIP = oldSvc.Spec.ClusterIP + } + if len(newSvc.Spec.ClusterIPs) == 0 { + newSvc.Spec.ClusterIPs = oldSvc.Spec.ClusterIPs + } + } + + if needsNodePort(oldSvc) && needsNodePort(newSvc) { + nodePortsUsed := func(svc *api.Service) sets.Int32 { + used := sets.NewInt32() + for _, p := range svc.Spec.Ports { + if p.NodePort != 0 { + used.Insert(p.NodePort) + } + } + return used + } + + // Build a set of all the ports in oldSvc that are also in newSvc. We know + // we can't patch these values. + used := nodePortsUsed(oldSvc).Intersection(nodePortsUsed(newSvc)) + + // Map NodePorts by name. The user may have changed other properties + // of the port, but we won't see that here. + np := map[string]int32{} + for i := range oldSvc.Spec.Ports { + p := &oldSvc.Spec.Ports[i] + np[p.Name] = p.NodePort + } + + // If newSvc is missing values, try to patch them in when we know them and + // they haven't been used for another port. + for i := range newSvc.Spec.Ports { + p := &newSvc.Spec.Ports[i] + if p.NodePort == 0 { + oldVal := np[p.Name] + if !used.Has(oldVal) { + p.NodePort = oldVal + } + } + } + } + + if needsHCNodePort(oldSvc) && needsHCNodePort(newSvc) { + if newSvc.Spec.HealthCheckNodePort == 0 { + newSvc.Spec.HealthCheckNodePort = oldSvc.Spec.HealthCheckNodePort + } + } +} + // NormalizeClusterIPs adjust clusterIPs based on ClusterIP. This must not // consider any other fields. func NormalizeClusterIPs(oldSvc, newSvc *api.Service) { diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go index 7a7929dfd920..429d3c3805e7 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/most_allocated.go @@ -110,7 +110,9 @@ func mostRequestedScore(requested, capacity int64) int64 { return 0 } if requested > capacity { - return 0 + // `requested` might be greater than `capacity` because pods with no + // requests get minimum values. + requested = capacity } return (requested * framework.MaxNodeScore) / capacity diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go b/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go index 6836a2150b46..7f8c8dde3d66 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go @@ -90,15 +90,19 @@ func MoreImportantPod(pod1, pod2 *v1.Pod) bool { return GetPodStartTime(pod1).Before(GetPodStartTime(pod2)) } -// PatchPod calculates the delta bytes change from to , +// PatchPodStatus calculates the delta bytes change from to , // and then submit a request to API server to patch the pod changes. -func PatchPod(cs kubernetes.Interface, old *v1.Pod, new *v1.Pod) error { - oldData, err := json.Marshal(old) +func PatchPodStatus(cs kubernetes.Interface, old *v1.Pod, newStatus *v1.PodStatus) error { + if newStatus == nil { + return nil + } + + oldData, err := json.Marshal(v1.Pod{Status: old.Status}) if err != nil { return err } - newData, err := json.Marshal(new) + newData, err := json.Marshal(v1.Pod{Status: *newStatus}) if err != nil { return err } @@ -128,9 +132,9 @@ func ClearNominatedNodeName(cs kubernetes.Interface, pods ...*v1.Pod) utilerrors if len(p.Status.NominatedNodeName) == 0 { continue } - podCopy := p.DeepCopy() - podCopy.Status.NominatedNodeName = "" - if err := PatchPod(cs, p, podCopy); err != nil { + podStatusCopy := p.Status.DeepCopy() + podStatusCopy.NominatedNodeName = "" + if err := PatchPodStatus(cs, p, podStatusCopy); err != nil { errs = append(errs, err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/util/removeall/OWNERS b/vendor/k8s.io/kubernetes/pkg/util/removeall/OWNERS new file mode 100644 index 000000000000..6b5f6ba5041c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/removeall/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-storage-approvers +reviewers: +- sig-storage-reviewers +labels: +- sig/storage diff --git a/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall.go b/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall.go index 1d268e18ee64..7801ecffc987 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall.go +++ b/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall.go @@ -25,16 +25,16 @@ import ( "k8s.io/mount-utils" ) -// RemoveAllOneFilesystem removes path and any children it contains. -// It removes everything it can but returns the first error -// it encounters. If the path does not exist, RemoveAll +// RemoveAllOneFilesystemCommon removes the path and any children it contains, +// using the provided remove function. It removes everything it can but returns +// the first error it encounters. If the path does not exist, RemoveAll // returns nil (no error). // It makes sure it does not cross mount boundary, i.e. it does *not* remove // files from another filesystems. Like 'rm -rf --one-file-system'. // It is copied from RemoveAll() sources, with IsLikelyNotMountPoint -func RemoveAllOneFilesystem(mounter mount.Interface, path string) error { +func RemoveAllOneFilesystemCommon(mounter mount.Interface, path string, remove func(string) error) error { // Simple case: if Remove works, we're done. - err := os.Remove(path) + err := remove(path) if err == nil || os.IsNotExist(err) { return nil } @@ -48,7 +48,7 @@ func RemoveAllOneFilesystem(mounter mount.Interface, path string) error { return serr } if !dir.IsDir() { - // Not a directory; return the error from Remove. + // Not a directory; return the error from remove. return err } @@ -76,7 +76,7 @@ func RemoveAllOneFilesystem(mounter mount.Interface, path string) error { for { names, err1 := fd.Readdirnames(100) for _, name := range names { - err1 := RemoveAllOneFilesystem(mounter, path+string(os.PathSeparator)+name) + err1 := RemoveAllOneFilesystemCommon(mounter, path+string(os.PathSeparator)+name, remove) if err == nil { err = err1 } @@ -97,7 +97,7 @@ func RemoveAllOneFilesystem(mounter mount.Interface, path string) error { fd.Close() // Remove directory. - err1 := os.Remove(path) + err1 := remove(path) if err1 == nil || os.IsNotExist(err1) { return nil } @@ -106,3 +106,23 @@ func RemoveAllOneFilesystem(mounter mount.Interface, path string) error { } return err } + +// RemoveAllOneFilesystem removes the path and any children it contains, using +// the os.Remove function. It makes sure it does not cross mount boundaries, +// i.e. it returns an error rather than remove files from another filesystem. +// It removes everything it can but returns the first error it encounters. +// If the path does not exist, it returns nil (no error). +func RemoveAllOneFilesystem(mounter mount.Interface, path string) error { + return RemoveAllOneFilesystemCommon(mounter, path, os.Remove) +} + +// RemoveDirsOneFilesystem removes the path and any empty subdirectories it +// contains, using the syscall.Rmdir function. Unlike RemoveAllOneFilesystem, +// RemoveDirsOneFilesystem will remove only directories and returns an error if +// it encounters any files in the directory tree. It makes sure it does not +// cross mount boundaries, i.e. it returns an error rather than remove dirs +// from another filesystem. It removes everything it can but returns the first +// error it encounters. If the path does not exist, it returns nil (no error). +func RemoveDirsOneFilesystem(mounter mount.Interface, path string) error { + return RemoveAllOneFilesystemCommon(mounter, path, syscall.Rmdir) +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go index d632ffd1433b..6d2f056cb48a 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go @@ -262,7 +262,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error c.supportsSELinux, err = c.kubeVolHost.GetHostUtil().GetSELinuxSupport(dir) if err != nil { - klog.V(2).Info(log("error checking for SELinux support: %s", err)) + // The volume is mounted. Return UncertainProgressError, so kubelet will unmount it when user deletes the pod. + return volumetypes.NewUncertainProgressError(fmt.Sprintf("error checking for SELinux support: %s", err)) } if c.supportsFSGroup(fsType, mounterArgs.FsGroup, c.fsGroupPolicy) { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go b/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go index cc5fe628af65..36b72e5e8ecf 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go @@ -108,7 +108,7 @@ func (hu *FakeHostUtil) GetOwner(pathname string) (int64, int64, error) { // GetSELinuxSupport tests if pathname is on a mount that supports SELinux. // Not implemented for testing func (hu *FakeHostUtil) GetSELinuxSupport(pathname string) (bool, error) { - return false, errors.New("GetSELinuxSupport not implemented") + return false, nil } // GetMode returns permissions of pathname. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go index 160d22c28ad8..ab1c523358bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go @@ -28,6 +28,7 @@ import ( "golang.org/x/sys/unix" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/util/selinux" "k8s.io/mount-utils" utilpath "k8s.io/utils/path" ) @@ -229,8 +230,16 @@ func DoMakeRShared(path string, mountInfoFilename string) error { return nil } +// selinux.SELinuxEnabled implementation for unit tests +type seLinuxEnabledFunc func() bool + // GetSELinux is common implementation of GetSELinuxSupport on Linux. -func GetSELinux(path string, mountInfoFilename string) (bool, error) { +func GetSELinux(path string, mountInfoFilename string, selinuxEnabled seLinuxEnabledFunc) (bool, error) { + // Skip /proc/mounts parsing if SELinux is disabled. + if !selinuxEnabled() { + return false, nil + } + info, err := findMountInfo(path, mountInfoFilename) if err != nil { return false, err @@ -253,7 +262,7 @@ func GetSELinux(path string, mountInfoFilename string) (bool, error) { // GetSELinuxSupport returns true if given path is on a mount that supports // SELinux. func (hu *HostUtil) GetSELinuxSupport(pathname string) (bool, error) { - return GetSELinux(pathname, procMountInfoPath) + return GetSELinux(pathname, procMountInfoPath, selinux.SELinuxEnabled) } // GetOwner returns the integer ID for the user and group of the given path diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go index 1140f75ce5d6..84cdf5e10514 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go @@ -209,8 +209,9 @@ func doBindSubPath(mounter mount.Interface, subpath Subpath) (hostPath string, e // Do the bind mount options := []string{"bind"} + mountFlags := []string{"--no-canonicalize"} klog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget) - if err = mounter.MountSensitiveWithoutSystemd(mountSource, bindPathTarget, "" /*fstype*/, options, nil); err != nil { + if err = mounter.MountSensitiveWithoutSystemdWithMountFlags(mountSource, bindPathTarget, "" /*fstype*/, options, nil /* sensitiveOptions */, mountFlags); err != nil { return "", fmt.Errorf("error mounting %s: %s", subpath.Path, err) } success = true diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go index 30db7bdb463f..5941ac795eae 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go @@ -259,7 +259,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo func gatherMetrics(f *framework.Framework) { ginkgo.By("Gathering metrics") var summary framework.TestDataSummary - grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, false, false, true, false, false) + grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), false, false, true, false, false) if err != nil { framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.") } else { diff --git a/vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go b/vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go index 6380cc0f138f..d6ba011775e5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go +++ b/vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go @@ -41,14 +41,16 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { ginkgo.BeforeEach(func() { ns = f.Namespace.Name - nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns) - framework.ExpectNotEqual(len(nodeList.Items), 0) - - pickedNode := nodeList.Items[0] - nodeIPs = e2enode.GetAddresses(&pickedNode, v1.NodeExternalIP) - // The pods running in the cluster can see the internal addresses. - nodeIPs = append(nodeIPs, e2enode.GetAddresses(&pickedNode, v1.NodeInternalIP)...) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, 1) + framework.ExpectNoError(err) + + family := v1.IPv4Protocol + if framework.TestContext.ClusterIsIPv6() { + family = v1.IPv6Protocol + } + + nodeIPs := e2enode.GetAddressesByTypeAndFamily(&nodes.Items[0], v1.NodeInternalIP, family) + framework.ExpectNotEqual(len(nodeIPs), 0) // make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works saName := "default" diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go b/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go index 0194152e6002..8a88134c86e2 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go @@ -296,7 +296,7 @@ func (f *Framework) BeforeEach() { gatherMetricsAfterTest := TestContext.GatherMetricsAfterTest == "true" || TestContext.GatherMetricsAfterTest == "master" if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics { - grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics) + grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics) if err != nil { Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err) } else { @@ -449,7 +449,7 @@ func (f *Framework) AfterEach() { ginkgo.By("Gathering metrics") // Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics. grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark") - grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics) + grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics) if err != nil { Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err) } else { diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go b/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go index 2c1810204efc..1d88794107de 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go @@ -139,7 +139,7 @@ func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (KubeletM if c == nil { return GrabKubeletMetricsWithoutProxy(nodeName, "/metrics") } - grabber, err := NewMetricsGrabber(c, nil, true, false, false, false, false) + grabber, err := NewMetricsGrabber(c, nil, nil, true, false, false, false, false) if err != nil { return KubeletMetrics{}, err } diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go b/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go index 1d341cc9360a..026015084103 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go @@ -18,31 +18,37 @@ package metrics import ( "context" + "errors" "fmt" + "net" "regexp" "sync" "time" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - + "k8s.io/client-go/rest" "k8s.io/klog/v2" + + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) const ( - // insecureSchedulerPort is the default port for the scheduler status server. - // May be overridden by a flag at startup. - // Deprecated: use the secure KubeSchedulerPort instead. - insecureSchedulerPort = 10251 - // insecureKubeControllerManagerPort is the default port for the controller manager status server. - // May be overridden by a flag at startup. - // Deprecated: use the secure KubeControllerManagerPort instead. - insecureKubeControllerManagerPort = 10252 + // kubeSchedulerPort is the default port for the scheduler status server. + kubeSchedulerPort = 10259 + // kubeControllerManagerPort is the default port for the controller manager status server. + kubeControllerManagerPort = 10257 ) +// MetricsGrabbingDisabledError is an error that is wrapped by the +// different MetricsGrabber.Wrap functions when metrics grabbing is +// not supported. Tests that check metrics data should then skip +// the check. +var MetricsGrabbingDisabledError = errors.New("metrics grabbing disabled") + // Collection is metrics collection of components type Collection struct { APIServerMetrics APIServerMetrics @@ -56,18 +62,27 @@ type Collection struct { type Grabber struct { client clientset.Interface externalClient clientset.Interface + config *rest.Config grabFromAPIServer bool grabFromControllerManager bool grabFromKubelets bool grabFromScheduler bool grabFromClusterAutoscaler bool kubeScheduler string + waitForSchedulerReadyOnce sync.Once kubeControllerManager string waitForControllerManagerReadyOnce sync.Once } -// NewMetricsGrabber returns new metrics which are initialized. -func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*Grabber, error) { +// NewMetricsGrabber prepares for grabbing metrics data from several different +// components. It should be called when those components are running because +// it needs to communicate with them to determine for which components +// metrics data can be retrieved. +// +// Collecting metrics data is an optional debug feature. Not all clusters will +// support it. If disabled for a component, the corresponding Grab function +// will immediately return an error derived from MetricsGrabbingDisabledError. +func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *rest.Config, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*Grabber, error) { kubeScheduler := "" kubeControllerManager := "" @@ -75,6 +90,10 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b regKubeScheduler := regexp.MustCompile("kube-scheduler-.*") regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*") + if (scheduler || controllers) && config == nil { + return nil, errors.New("a rest config is required for grabbing kube-controller and kube-controller-manager metrics") + } + podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, err @@ -93,31 +112,46 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b break } } - if kubeScheduler == "" { - scheduler = false - klog.Warningf("Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled.") - } - if kubeControllerManager == "" { - controllers = false - klog.Warningf("Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled.") - } - if ec == nil { + if clusterAutoscaler && ec == nil { klog.Warningf("Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled.") } return &Grabber{ client: c, externalClient: ec, + config: config, grabFromAPIServer: apiServer, - grabFromControllerManager: controllers, + grabFromControllerManager: checkPodDebugHandlers(c, controllers, "kube-controller-manager", kubeControllerManager), grabFromKubelets: kubelets, - grabFromScheduler: scheduler, + grabFromScheduler: checkPodDebugHandlers(c, scheduler, "kube-scheduler", kubeScheduler), grabFromClusterAutoscaler: clusterAutoscaler, kubeScheduler: kubeScheduler, kubeControllerManager: kubeControllerManager, }, nil } +func checkPodDebugHandlers(c clientset.Interface, requested bool, component, podName string) bool { + if !requested { + return false + } + if podName == "" { + klog.Warningf("Can't find %s pod. Grabbing metrics from %s is disabled.", component, component) + return false + } + + // The debug handlers on the host where the pod runs might be disabled. + // We can check that indirectly by trying to retrieve log output. + limit := int64(1) + if _, err := c.CoreV1().Pods(metav1.NamespaceSystem).GetLogs(podName, &v1.PodLogOptions{LimitBytes: &limit}).DoRaw(context.TODO()); err != nil { + klog.Warningf("Can't retrieve log output of %s (%q). Debug handlers might be disabled in kubelet. Grabbing metrics from %s is disabled.", + podName, err, component) + return false + } + + // Metrics gathering enabled. + return true +} + // HasControlPlanePods returns true if metrics grabber was able to find control-plane pods func (g *Grabber) HasControlPlanePods() bool { return g.kubeScheduler != "" && g.kubeControllerManager != "" @@ -149,20 +183,38 @@ func (g *Grabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (Kub // GrabFromScheduler returns metrics from scheduler func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) { - if g.kubeScheduler == "" { - return SchedulerMetrics{}, fmt.Errorf("kube-scheduler pod is not registered. Skipping Scheduler's metrics gathering") + if !g.grabFromScheduler { + return SchedulerMetrics{}, fmt.Errorf("kube-scheduler: %w", MetricsGrabbingDisabledError) } - output, err := g.getMetricsFromPod(g.client, g.kubeScheduler, metav1.NamespaceSystem, insecureSchedulerPort) + + var err error + + g.waitForSchedulerReadyOnce.Do(func() { + if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeScheduler, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { + err = fmt.Errorf("error waiting for kube-scheduler pod to be ready: %w", readyErr) + } + }) if err != nil { return SchedulerMetrics{}, err } + + var lastMetricsFetchErr error + var output string + if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { + output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort) + return lastMetricsFetchErr == nil, nil + }); metricsWaitErr != nil { + err := fmt.Errorf("error waiting for kube-scheduler pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr) + return SchedulerMetrics{}, err + } + return parseSchedulerMetrics(output) } // GrabFromClusterAutoscaler returns metrics from cluster autoscaler func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) { if !g.HasControlPlanePods() && g.externalClient == nil { - return ClusterAutoscalerMetrics{}, fmt.Errorf("Did not find control-plane pods. Skipping ClusterAutoscaler's metrics gathering") + return ClusterAutoscalerMetrics{}, fmt.Errorf("ClusterAutoscaler: %w", MetricsGrabbingDisabledError) } var client clientset.Interface var namespace string @@ -182,35 +234,31 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) // GrabFromControllerManager returns metrics from controller manager func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) { - if g.kubeControllerManager == "" { - return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager pod is not registered. Skipping ControllerManager's metrics gathering") + if !g.grabFromControllerManager { + return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager: %w", MetricsGrabbingDisabledError) } var err error - podName := g.kubeControllerManager - g.waitForControllerManagerReadyOnce.Do(func() { - if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, podName, 0); readyErr != nil { - err = fmt.Errorf("error waiting for controller manager pod to be ready: %w", readyErr) - return - } - var lastMetricsFetchErr error - if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - _, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort) - return lastMetricsFetchErr == nil, nil - }); metricsWaitErr != nil { - err = fmt.Errorf("error waiting for controller manager pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr) - return + g.waitForControllerManagerReadyOnce.Do(func() { + if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeControllerManager, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil { + err = fmt.Errorf("error waiting for kube-controller-manager pod to be ready: %w", readyErr) } }) if err != nil { return ControllerManagerMetrics{}, err } - output, err := g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort) - if err != nil { + var output string + var lastMetricsFetchErr error + if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { + output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort) + return lastMetricsFetchErr == nil, nil + }); metricsWaitErr != nil { + err := fmt.Errorf("error waiting for kube-controller-manager to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr) return ControllerManagerMetrics{}, err } + return parseControllerManagerMetrics(output) } @@ -281,12 +329,13 @@ func (g *Grabber) Grab() (Collection, error) { return result, nil } +// getMetricsFromPod retrieves metrics data from an insecure port. func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string, namespace string, port int) (string, error) { rawOutput, err := client.CoreV1().RESTClient().Get(). Namespace(namespace). Resource("pods"). SubResource("proxy"). - Name(fmt.Sprintf("%v:%v", podName, port)). + Name(fmt.Sprintf("%s:%d", podName, port)). Suffix("metrics"). Do(context.TODO()).Raw() if err != nil { @@ -294,3 +343,50 @@ func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string, } return string(rawOutput), nil } + +// getSecureMetricsFromPod retrieves metrics from a pod that uses TLS +// and checks client credentials. Conceptually this function is +// similar to "kubectl port-forward" + "kubectl get --raw +// https://localhost:/metrics". It uses the same credentials +// as kubelet. +func (g *Grabber) getSecureMetricsFromPod(podName string, namespace string, port int) (string, error) { + dialer := e2epod.NewDialer(g.client, g.config) + metricConfig := rest.CopyConfig(g.config) + addr := e2epod.Addr{ + Namespace: namespace, + PodName: podName, + Port: port, + } + metricConfig.Dial = func(ctx context.Context, network, address string) (net.Conn, error) { + return dialer.DialContainerPort(ctx, addr) + } + // This should make it possible verify the server, but while it + // got past the server name check, certificate validation + // still failed. + metricConfig.Host = addr.String() + metricConfig.ServerName = "localhost" + // Verifying the pod certificate with the same root CA + // as for the API server led to an error about "unknown root + // certificate". Disabling certificate checking on the client + // side gets around that and should be good enough for + // E2E testing. + metricConfig.Insecure = true + metricConfig.CAFile = "" + metricConfig.CAData = nil + + // clientset.NewForConfig is used because + // metricClient.RESTClient() is directly usable, in contrast + // to the client constructed by rest.RESTClientFor(). + metricClient, err := clientset.NewForConfig(metricConfig) + if err != nil { + return "", err + } + + rawOutput, err := metricClient.RESTClient().Get(). + AbsPath("metrics"). + Do(context.TODO()).Raw() + if err != nil { + return "", err + } + return string(rawOutput), nil +} diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/pod/dial.go b/vendor/k8s.io/kubernetes/test/e2e/framework/pod/dial.go new file mode 100644 index 000000000000..d0ae2880acfe --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/pod/dial.go @@ -0,0 +1,215 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "regexp" + "strconv" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + "k8s.io/klog/v2" +) + +// NewTransport creates a transport which uses the port forward dialer. +// URLs must use .: as host. +func NewTransport(client kubernetes.Interface, restConfig *rest.Config) *http.Transport { + return &http.Transport{ + DialContext: func(ctx context.Context, _, addr string) (net.Conn, error) { + dialer := NewDialer(client, restConfig) + a, err := ParseAddr(addr) + if err != nil { + return nil, err + } + return dialer.DialContainerPort(ctx, *a) + }, + } +} + +// NewDialer creates a dialer that supports connecting to container ports. +func NewDialer(client kubernetes.Interface, restConfig *rest.Config) *Dialer { + return &Dialer{ + client: client, + restConfig: restConfig, + } +} + +// Dialer holds the relevant parameters that are independent of a particular connection. +type Dialer struct { + client kubernetes.Interface + restConfig *rest.Config +} + +// DialContainerPort connects to a certain container port in a pod. +func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Conn, finalErr error) { + restClient := d.client.CoreV1().RESTClient() + restConfig := d.restConfig + if restConfig.GroupVersion == nil { + restConfig.GroupVersion = &schema.GroupVersion{} + } + if restConfig.NegotiatedSerializer == nil { + restConfig.NegotiatedSerializer = scheme.Codecs + } + + // The setup code around the actual portforward is from + // https://github.com/kubernetes/kubernetes/blob/c652ffbe4a29143623a1aaec39f745575f7e43ad/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go + req := restClient.Post(). + Resource("pods"). + Namespace(addr.Namespace). + Name(addr.PodName). + SubResource("portforward") + transport, upgrader, err := spdy.RoundTripperFor(restConfig) + if err != nil { + return nil, fmt.Errorf("create round tripper: %v", err) + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) + + streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name) + if err != nil { + return nil, fmt.Errorf("dialer failed: %v", err) + } + requestID := "1" + defer func() { + if finalErr != nil { + streamConn.Close() + } + }() + + // create error stream + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeError) + headers.Set(v1.PortHeader, fmt.Sprintf("%d", addr.Port)) + headers.Set(v1.PortForwardRequestIDHeader, requestID) + + // We're not writing to this stream, just reading an error message from it. + // This happens asynchronously. + errorStream, err := streamConn.CreateStream(headers) + if err != nil { + return nil, fmt.Errorf("error creating error stream: %v", err) + } + errorStream.Close() + go func() { + message, err := ioutil.ReadAll(errorStream) + switch { + case err != nil: + klog.ErrorS(err, "error reading from error stream") + case len(message) > 0: + klog.ErrorS(errors.New(string(message)), "an error occurred connecting to the remote port") + } + }() + + // create data stream + headers.Set(v1.StreamType, v1.StreamTypeData) + dataStream, err := streamConn.CreateStream(headers) + if err != nil { + return nil, fmt.Errorf("error creating data stream: %v", err) + } + + return &stream{ + Stream: dataStream, + streamConn: streamConn, + }, nil +} + +// Addr contains all relevant parameters for a certain port in a pod. +// The container should be running before connections are attempted, +// otherwise the connection will fail. +type Addr struct { + Namespace, PodName string + Port int +} + +var _ net.Addr = Addr{} + +func (a Addr) Network() string { + return "port-forwarding" +} + +func (a Addr) String() string { + return fmt.Sprintf("%s.%s:%d", a.Namespace, a.PodName, a.Port) +} + +// ParseAddr expects a .: as produced +// by Addr.String. +func ParseAddr(addr string) (*Addr, error) { + parts := addrRegex.FindStringSubmatch(addr) + if parts == nil { + return nil, fmt.Errorf("%q: must match the format .:", addr) + } + port, _ := strconv.Atoi(parts[3]) + return &Addr{ + Namespace: parts[1], + PodName: parts[2], + Port: port, + }, nil +} + +var addrRegex = regexp.MustCompile(`^([^\.]+)\.([^:]+):(\d+)$`) + +type stream struct { + addr Addr + httpstream.Stream + streamConn httpstream.Connection +} + +var _ net.Conn = &stream{} + +func (s *stream) Close() error { + s.Stream.Close() + s.streamConn.Close() + return nil +} + +func (s *stream) LocalAddr() net.Addr { + return LocalAddr{} +} + +func (s *stream) RemoteAddr() net.Addr { + return s.addr +} + +func (s *stream) SetDeadline(t time.Time) error { + return nil +} + +func (s *stream) SetReadDeadline(t time.Time) error { + return nil +} + +func (s *stream) SetWriteDeadline(t time.Time) error { + return nil +} + +type LocalAddr struct{} + +var _ net.Addr = LocalAddr{} + +func (l LocalAddr) Network() string { return "port-forwarding" } +func (l LocalAddr) String() string { return "apiserver" } diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/service/jig.go b/vendor/k8s.io/kubernetes/test/e2e/framework/service/jig.go index 35c317a6501d..dd2991f1673e 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/service/jig.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/service/jig.go @@ -304,6 +304,10 @@ func (j *TestJig) ListNodesWithEndpoint() ([]v1.Node, error) { // GetEndpointNodeNames returns a string set of node names on which the // endpoints of the given Service are running. func (j *TestJig) GetEndpointNodeNames() (sets.String, error) { + err := j.waitForAvailableEndpoint(ServiceEndpointsTimeout) + if err != nil { + return nil, err + } endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err) diff --git a/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go b/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go index 1c1c4dcead3a..2142da526843 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go +++ b/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go @@ -94,9 +94,11 @@ // test/e2e/testing-manifests/storage-csi/OWNERS // test/e2e/testing-manifests/storage-csi/controller-role.yaml // test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml +// test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-agent/rbac.yaml +// test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml // test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml // test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml -// test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml +// test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml // test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml // test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml // test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml @@ -106,8 +108,8 @@ // test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-provisioner.yaml // test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-resizer.yaml // test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-snapshotter.yaml +// test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml // test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml -// test/e2e/testing-manifests/storage-csi/hostpath/usage/csi-storageclass.yaml // test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml // test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml // test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml @@ -116,6 +118,7 @@ // test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml // test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml // test/e2e/testing-manifests/storage-csi/mock/csi-storageclass.yaml +// test/e2e/testing-manifests/storage-csi/update-hostpath.sh // test/e2e_node/testing-manifests/sriovdp-cm.yaml // test/e2e_node/testing-manifests/sriovdp-ds.yaml // test/e2e_node/testing-manifests/sriovdp-sa.yaml @@ -1737,7 +1740,7 @@ func testE2eTestingManifestsSampleDevicePluginYaml() (*asset, error) { return a, nil } -var _testE2eTestingManifestsSchedulingNvidiaDriverInstallerYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x54\x4d\x6f\xe3\x36\x10\xbd\xeb\x57\x0c\xb2\x67\x59\xc9\xb6\x45\x17\xba\x19\x71\xd0\x1a\xcd\xda\x41\xec\xe4\x6a\xd0\xe4\x58\x22\x44\x91\xea\x70\xa8\x8d\xff\x7d\x41\xc9\xf1\x4a\xde\x38\xf1\x76\x75\xd3\x7c\xbc\xf9\xe0\x7b\xf3\x09\xd6\xa5\xf6\x30\x13\x58\x3b\xbb\x42\x86\x6f\xc2\x83\x23\x5d\x68\x2b\x8c\xd9\x03\xe1\x0e\x09\xad\x44\x05\x3b\x72\x75\xf2\x09\x4a\xe6\xc6\xe7\x59\x56\x68\x2e\xc3\x76\x22\x5d\x9d\xfd\xe5\x5c\x61\xf0\xd6\xb8\xa0\x1e\x8c\xe0\x9d\xa3\x3a\x93\xce\xb2\xd0\x16\x29\x45\x5b\x68\x8b\xa9\x90\x12\x0d\x92\x60\x47\x3e\xdb\x1a\xb7\xcd\x6a\xe1\x19\x29\x53\x5d\x6d\x8f\x3c\xd9\x8b\xda\x24\x49\x6c\x09\x61\xe6\x64\x85\xb4\xd3\x06\x41\x58\x05\x8e\x4b\x24\xf0\x2e\x90\x44\xd8\x39\x02\x8e\x6d\x1f\x53\x41\x10\x82\xb6\x3f\xd3\x9e\x4f\x8b\x26\xa4\xda\x7a\x16\xc6\x20\x25\x89\x68\xf4\x33\x92\xd7\xce\xe6\x20\x9a\xc6\x67\xed\x4d\x52\x69\xab\xf2\xef\xdb\x49\x6a\x64\xa1\x04\x8b\x3c\x01\xb0\xa2\xc6\x1c\x6c\xab\x95\x16\xa9\x22\xdd\x22\x0d\xe0\x7a\xbf\x6f\x84\xc4\x1c\xaa\xb0\xc5\xd4\xef\x3d\x63\x9d\x00\x18\xb1\x45\xe3\x23\x04\x40\xf5\xc5\xa7\xa2\x69\xce\xe3\xf8\x06\x65\x0c\xf5\x68\x50\xb2\xa3\x3e\xad\x16\x2c\xcb\xfb\x01\xce\x05\x48\x00\xa1\x51\x82\x71\xc5\x24\x18\x8b\x7d\x9f\xc8\xfb\x06\x73\x78\x74\xc6\x68\x5b\x3c\x75\x01\x09\x00\x63\xdd\x18\xc1\x78\xa8\x36\x98\x3a\x7e\x66\x54\xf8\xe3\x4d\x5c\xdc\x20\xc0\xeb\xb8\xf1\x13\xbb\x9d\xb6\x9a\xf7\x83\x42\x4e\xe1\xf4\x07\x2b\x00\xe1\xbf\x41\x13\xaa\x59\x20\x6d\x8b\x95\x2c\x51\x85\x38\xcf\xbc\xb0\xee\x68\xbe\x7b\x41\x19\x38\x3e\xef\x20\xb3\xc7\x5c\x1d\x96\xbb\x46\xaa\xfd\xd8\x9d\xf6\xbb\xbe\x7b\x69\x08\x7d\x24\xc7\x89\x3f\x46\x54\xb8\xcf\x41\x46\x7e\x4d\x8a\x8e\x6b\x1d\xed\x8a\x6a\x44\xfa\x93\x2c\x00\xd7\xf4\x8e\x1c\xee\x5e\xb4\x67\x7f\x08\x60\xd7\x25\x0c\x2b\xa5\x83\xd8\xab\x3e\xf8\xea\xe0\x2a\x9d\xe7\x05\xf2\x37\x47\x55\x0e\x4c\x01\x07\xf6\x87\xf9\x6c\x64\x6b\x9d\x09\x35\x0e\x50\xfb\x77\x53\xd8\x1e\x7b\xeb\xd2\x04\x97\xc3\x19\x9b\xf8\x0f\xd9\xf7\xb0\xd7\xc4\x36\x98\x4a\xd8\x54\x4b\x95\xd6\x2e\x58\xbe\x04\xa5\x74\x35\x66\x51\x0e\x64\x91\xd1\x67\x5b\x6d\xb3\x9e\x0f\x59\x0f\x97\x69\xa9\x26\xea\xa4\xd2\x81\x31\x07\xaa\xa4\x4a\x53\x1a\x6b\xfc\x5a\xc1\x93\x1a\xe4\x1c\x5f\x3e\xc7\xc1\x12\x99\x78\xfb\x7a\xe9\x06\x9b\xd5\xb5\x28\x30\x87\x42\xd2\x44\xbb\xee\xd8\x74\xf4\xf8\xf1\xec\xe4\xed\xe7\xeb\xcf\xd7\xd7\x7f\x5e\xdf\xfc\xa4\x9a\x08\xfb\x63\xe8\x4f\x75\x80\x9e\x4f\x28\x2a\x9b\x90\xc3\xf5\xe4\xe6\x8f\xa3\xd5\xa3\x0c\xa4\x79\x1f\x5b\xc7\x17\x1e\xcd\x47\xba\xd5\x06\x0b\x54\x23\xee\x00\xa0\x6d\x87\x71\xaf\x6b\x5b\x3c\xcf\x67\xf3\xe9\x66\xbe\x58\xad\xa7\xf7\xf7\x9b\xd9\xfc\x71\xf3\xf7\x72\xb5\x1e\x35\xd0\x0a\x13\xf0\x92\xb7\xf8\x00\xf8\x76\xb9\x58\x4f\xe7\x8b\xbb\xc7\x37\xd1\x83\xa7\xcc\x38\x29\xcc\x79\xcc\xe7\xa7\xfb\x7f\xa6\x8b\xcd\xfc\x76\xf6\x7f\x1b\x7d\x8b\xa5\xef\x54\x78\xbf\x63\x64\xf9\x11\xde\xe3\x72\xb9\xde\x7c\x5d\x3e\x2d\xd6\x11\xef\x4d\x94\x48\xdc\xa3\xa3\xd7\xf8\xd7\x48\xe3\x01\x0b\x2e\xd5\x11\x40\x27\x80\x87\x9e\xe3\x67\x17\xfa\xe1\x01\x18\xe3\x9c\x1d\xf3\xad\x0b\x34\x4e\x1d\x7a\xde\x11\xea\x38\x69\xb0\x10\x79\x5e\x9b\x57\xd5\x17\x3f\x39\x08\xb4\x11\xc1\x63\xfe\xdb\xe4\xf7\xc9\xcd\xd5\x89\x0e\x3b\x57\x92\xfc\x17\x00\x00\xff\xff\x76\x9a\x5b\x51\x28\x09\x00\x00") +var _testE2eTestingManifestsSchedulingNvidiaDriverInstallerYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x55\x4b\x6f\xe3\x36\x10\xbe\xeb\x57\x0c\xb2\x97\x16\x85\x24\x3f\x92\x6c\xa2\x53\x83\x38\xd8\x1a\x4d\xec\x20\x76\xf6\x6a\x50\xe4\x48\x26\x4c\x91\x2a\x39\xd2\xc6\xfd\xf5\x05\x25\xc5\x91\x9d\x97\xb7\xeb\x9b\x39\x33\xdf\x3c\xf4\xcd\x37\x5f\x60\xb9\x96\x0e\x26\x0c\x0b\xa3\x17\x48\xf0\x83\x39\x30\x56\xe6\x52\x33\xa5\xb6\x60\x31\x43\x8b\x9a\xa3\x80\xcc\x9a\x22\xf8\x02\x6b\xa2\xd2\x25\x71\x9c\x4b\x5a\x57\x69\xc4\x4d\x11\x7f\x33\x26\x57\x78\xad\x4c\x25\xee\x15\xa3\xcc\xd8\x22\xe6\x46\x13\x93\x1a\x6d\x88\x3a\x97\x1a\x43\xc6\x39\x2a\xb4\x8c\x8c\x75\x71\xaa\x4c\x1a\x17\xcc\x11\xda\x58\x34\xb9\x1d\x52\xb4\x65\x85\x0a\x02\x5f\x12\xc2\xc4\xf0\x0d\xda\x4c\x2a\x04\xa6\x05\x18\x5a\xa3\x05\x67\x2a\xcb\x11\x32\x63\x81\x7c\xd9\xbb\x50\x60\x16\x41\xea\x9f\x29\xcf\x85\x79\x59\x85\x52\x3b\x62\x4a\xa1\x0d\x02\x56\xca\xef\x68\x9d\x34\x3a\x01\x56\x96\x2e\xae\x87\xc1\x46\x6a\x91\xbc\x4c\x27\x28\x90\x98\x60\xc4\x92\x00\x40\xb3\x02\x13\xd0\xb5\x14\x92\x85\xc2\xca\x1a\x6d\x0f\xae\xb5\xbb\x92\x71\x4c\x60\x53\xa5\x18\xba\xad\x23\x2c\x02\x00\xc5\x52\x54\xce\x43\x00\x6c\x2e\x5c\xc8\xca\xf2\x7d\x1c\x57\x22\xf7\xae\x0e\x15\x72\x32\xb6\x0d\x2b\x18\xf1\xf5\x6d\x0f\xe7\x08\x24\x80\xaa\x14\x8c\x70\x41\x96\x11\xe6\xdb\x36\x90\xb6\x25\x26\xf0\x60\x94\x92\x3a\x7f\x6c\x1c\x02\x00\xc2\xa2\x54\x8c\xb0\xcb\xd6\xeb\xda\xff\xd4\x5e\xe2\xcf\x27\x71\x74\x81\x00\xcf\xed\xfa\x1f\xcb\x32\xa9\x25\x6d\x7b\x89\x8c\xc0\xab\x57\xaf\x00\x16\xff\xa9\xa4\x45\x31\xa9\xac\xd4\xf9\x82\xaf\x51\x54\xbe\x9f\x69\xae\xcd\xee\xf9\xe6\x09\x79\x45\xfe\xf3\xf6\x22\x5b\xcc\x45\x37\xdc\x25\xda\xc2\xed\x9b\xc3\x76\xd6\x37\x4f\xa5\x45\xe7\xc9\x71\x60\xf7\x1e\x1b\xdc\x26\xc0\x3d\xbf\xa2\xbc\xe1\x5a\x43\xbb\x7c\xb3\x47\xfa\x83\x28\x00\x53\xb6\x86\x04\x6e\x9e\xa4\x23\xd7\x39\x90\x69\x02\xfa\x99\xc2\x9e\xef\x49\xeb\x7c\xd2\x99\xd6\xc6\xd1\x0c\xe9\x87\xb1\x9b\x04\xc8\x56\xd8\x7b\xbf\x9f\x4e\xf6\xde\x6a\xa3\xaa\x02\x7b\xa8\xed\x77\x13\x58\xef\x6a\x6b\xc2\x18\xad\xfb\x3d\x96\xfe\x3f\xc4\x2f\x6e\xcf\x81\x75\xa5\x36\x4c\x87\x92\x8b\xb0\x30\x95\xa6\x63\x50\xd6\xa6\xc0\xd8\xaf\x83\xd5\x48\xe8\xe2\x54\xea\xb8\xe5\x43\xdc\xc2\xc5\x92\x8b\x48\x1c\x64\xea\x18\xd3\x51\x25\x14\xd2\x86\x3e\xc7\xaf\x25\x3c\xc8\x61\x8d\xa1\xe3\xfb\xe8\x5e\x3c\x13\xaf\x9f\x95\xae\xc7\x8c\x56\xc3\xae\xe7\x0b\xf8\x76\xff\x08\x3b\x8a\x83\x2c\x58\x8e\x50\xb7\x32\x03\x05\xdb\x42\x8a\x20\xb0\x44\x2d\x50\x13\x18\x0d\xb4\x7e\xb1\x9b\xac\x81\x48\x51\xea\x1c\x2a\x87\x22\xea\x25\x78\xf0\xca\x0c\x64\x40\x20\x31\xa9\x1c\xb0\xd4\x54\xd4\xc4\xf7\xf2\xe9\x9d\x20\x72\xe3\x3a\x76\xb6\x2a\xda\x70\x94\x1b\x17\x93\x31\xca\xc5\x7f\xc4\x16\x33\x17\xaf\x91\x09\xf7\xac\xcc\xce\xf2\x98\x17\xc2\x7b\xad\xf2\xb2\x5a\xed\x70\xe3\x5e\x1d\x5e\x9d\xa9\x6b\xd6\xa2\x42\xe6\x10\xb4\x21\x74\xf0\xdb\x2e\xf5\xe1\x6a\xbc\x1c\x07\x53\x92\x2c\xe4\xbf\x28\x42\xe3\x62\x61\xb8\x8b\x3b\x8c\xb0\xc1\xf8\xbd\x6b\x10\x6d\x21\xf5\xcb\x60\x5e\x0f\xd6\x9f\x04\x06\xb9\xac\x51\x1f\x8c\x2f\x0a\x7a\xc5\xde\xb1\xd2\x79\xc8\x9c\xdb\x48\x9a\xe6\x0a\x34\xc5\xbd\xbe\x07\x49\x3d\x8a\x06\xd1\x18\x42\x70\x95\x24\x96\xaa\xf6\xec\xf8\xcc\x77\x17\x67\xc0\x1c\x94\x68\xe1\xd7\x3b\xfc\xe2\x33\x5f\x9c\x85\xc3\xf1\x78\x38\x08\x87\xa3\xc1\x65\x38\xde\x31\xb3\xa1\x4b\x72\x44\xb5\x7f\xba\x35\x1b\x9d\x9d\x27\x43\x9e\x8d\xbe\x0e\x86\x82\x8f\xf8\xf8\xf2\xf4\x94\x5d\x8e\x33\x31\x38\xe7\xe9\x39\xbf\x44\x14\x19\x4b\xb3\xf1\x68\x7c\x3a\x3a\x3b\xbd\x18\xa7\x6c\xcc\xb2\xd1\xe5\xe9\xd9\x70\x70\x3e\x1a\xa6\xe9\xf8\xab\x18\xe0\x4f\x4a\xba\xed\xb8\xe4\x0e\xc5\x18\x1d\x1d\xe8\x24\x2f\xab\x04\x06\xd1\xf0\x6c\xf7\xea\x90\x57\x56\xd2\xd6\xef\x0f\x3e\xd1\xde\x92\x59\x59\x4b\x85\x39\x8a\x3d\x01\x03\x40\x5d\xf7\xfd\x9e\x77\x77\xf6\x7d\x3a\x99\x5e\xad\xa6\xb3\xc5\xf2\xea\xf6\x76\x35\x99\x3e\xac\xfe\x9a\x2f\x96\x7b\x05\xd4\x4c\x55\x78\x8c\x20\x7c\x02\x7c\x3d\x9f\x2d\xaf\xa6\xb3\x9b\x87\x37\xd1\x2b\x67\x63\x65\x38\x53\xef\x63\x7e\x7f\xbc\xfd\xfb\x6a\xb6\x9a\x5e\x4f\xfe\x6f\xa1\x6f\x49\xe5\x07\x19\x3e\xae\x18\x89\x7f\x86\xf7\x30\x9f\x2f\x57\x77\xf3\xc7\xd9\xd2\xe3\xbd\x89\xe2\xd5\x73\x67\x68\x0f\xcd\x9d\xd7\xd2\x1e\x0b\x8e\x15\x73\x80\x46\x85\xef\x5b\xa1\x7d\x77\xa0\x9f\x5e\xa1\x7d\x9c\x77\xdb\x7c\xeb\x0c\xee\x87\xf6\x2d\x1f\x5c\x8b\xfd\xa0\xde\x40\xf8\xab\x03\xb1\xdb\xec\x93\xcd\x85\x8b\xba\xf5\x2e\x59\xe5\x30\x19\x47\xa7\xd1\xf0\xe4\x60\x0f\x1b\x53\x10\xfc\x17\x00\x00\xff\xff\x80\x97\xe4\x6a\xad\x0b\x00\x00") func testE2eTestingManifestsSchedulingNvidiaDriverInstallerYamlBytes() ([]byte, error) { return bindataRead( @@ -2357,7 +2360,7 @@ func testE2eTestingManifestsStorageCsiControllerRoleYaml() (*asset, error) { return a, nil } -var _testE2eTestingManifestsStorageCsiExternalAttacherRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x55\xdf\x8b\x1b\x47\x0c\x7e\xdf\xbf\x42\xdc\xbe\xb4\xe0\xb5\xc9\x5b\x71\x9f\x2e\x47\x5b\x02\x2d\x94\x5c\x09\x94\x12\x88\x3c\xa3\xf5\xaa\x9e\x9d\x59\x24\x8d\x9d\xcb\x5f\x5f\x66\xd6\xbe\x1f\x39\xa7\xb9\xe4\x1a\xe8\x93\x07\x59\xab\x4f\xfa\x24\x7d\x6a\xe1\x8f\x81\x15\xfe\xbc\xfc\xed\x57\xe8\x39\x10\xb8\x14\x0d\x39\x2a\x60\x08\xf0\xfa\xe5\xe5\x15\xa4\xcd\xdf\xe4\x4c\xc1\x06\x34\x40\x21\x88\xe4\x48\x15\xe5\x06\x2c\x81\xe4\x08\xf4\xde\x48\x22\x86\xa6\x85\xab\xeb\x57\x80\x66\xe8\x06\x92\x65\xd3\x36\x2d\xbc\x8a\x30\x49\xf2\xd9\x19\xa7\xb8\x00\x42\x37\x54\x2f\x2f\xbc\x27\x01\x4f\x53\x48\x37\x23\x45\x83\x01\xb5\x44\xdc\x10\xb8\xac\x96\x46\xfe\x40\x7e\xdd\xb4\xd0\x15\x2b\xee\x13\xfb\x92\x5d\x1f\xd8\x99\x2e\x20\x2b\x41\x4c\xb1\xf3\xd4\x63\x0e\x06\x11\x47\xd2\x09\x1d\x01\x46\x0f\x9e\xfb\x9e\xa4\x44\xad\xf6\xa6\x05\x80\x3e\x49\xfd\xe2\xd6\xd3\x03\x45\x63\x63\x52\x08\xbc\x23\xb0\x81\xe0\x2a\x64\x35\x92\xd7\x29\x50\x85\xf6\xe4\xd8\x13\x1c\x06\xb2\x81\xa4\xba\xdc\x4b\x59\x68\x0a\xec\xd0\x48\xeb\x3f\x27\x22\x4a\x81\x15\xf2\x44\xc5\x02\x38\xc2\x61\x60\x37\x80\x43\x25\x08\x84\x9e\x44\x07\x9e\x80\x02\x55\x6a\x60\xcc\x6a\xa5\x78\x8a\xb8\x09\xe4\x7f\xac\x01\xac\x74\x87\x63\x1f\x32\x45\x77\x44\xa9\x5d\x51\xb2\x3c\x2d\x40\x89\x60\x43\x21\x1d\x9a\x06\x27\x7e\x43\xa2\x9c\xe2\x1a\xf6\x2f\x9a\x1d\x47\xbf\x86\x6b\x92\x3d\x3b\xba\x74\x2e\xe5\x68\xcd\x48\x86\x1e\x0d\xd7\x0d\x54\x62\xd6\xe0\x94\xbb\x53\x96\x0d\x40\x5b\x4b\x2a\x2c\x1e\xd8\x86\x4f\x10\x5c\x5e\xc7\x00\xd5\xb0\x86\xa3\x4f\xd3\x74\x5d\xd7\xb4\x70\x79\x0c\x78\x5b\x53\xa9\xa8\x74\xf1\x90\x64\x37\x47\xfe\xfd\x8d\x2e\x20\x26\x4f\x5a\xfb\xf5\x26\x85\x3c\xd2\xfc\x5d\x61\x56\x8f\xf9\xdf\x6f\xc7\xfd\x02\x65\x83\x6e\x89\xd9\x86\x24\xfc\x01\x0b\x7f\xcb\xdd\x0f\xba\xe4\xb4\xda\xbf\x38\x53\xe5\xa9\x31\xb7\xa5\x76\x92\x63\x24\x69\x24\x07\xd2\xe2\xd7\x01\x4e\xfc\x8b\xa4\x3c\xe9\x1a\xfe\xba\xb8\x78\xdb\x00\x00\x08\x69\xca\xe2\xa8\xda\xa6\x02\xae\x46\xd1\xf6\x35\x5b\x3d\x3a\xed\x49\x36\xd5\x61\x4b\x76\xb1\x80\x8b\xc0\x5a\x7f\x0f\x68\x6e\x28\x8f\x3c\x79\x34\x2a\xaf\xa9\x9a\xde\x3e\x0d\xaf\xb2\xf3\x24\x8c\x33\x01\xd5\x92\xe0\x96\x8e\xac\x9c\x0b\xef\x94\xbf\x31\xc2\x4c\x13\xde\x35\xf5\x3f\x24\xec\x2b\xd0\x57\x6a\x68\xf9\x51\x12\x27\x88\xf6\x9a\x9c\x90\xc1\x44\x32\xb2\x96\x31\x03\x56\x48\x53\x19\x2e\x0c\xcb\xa6\xfd\xa9\x2e\x26\xb0\x01\xf7\x70\x93\x32\x44\x22\x0f\x7b\x0c\x99\xa0\x97\x34\x82\xd6\x00\xcb\xa6\xfd\x39\x09\xd0\x7b\x1c\xa7\x40\x8b\xea\x39\xe0\x9e\x60\x47\x37\xf0\xce\x29\x2f\x1f\xe6\xbe\x2a\x92\x2b\x29\x04\x92\x6e\xca\x9b\xc0\x3a\x74\x73\xa4\xaa\x54\xef\x8a\x74\x5c\xcf\x5f\x5c\x05\x54\x5d\x4e\x28\x38\x92\x91\x68\xd3\x96\xfd\x1f\xcc\x26\x5d\xaf\x56\xbb\xbc\x21\x89\x64\xa4\x5d\x41\xd9\xb2\x0d\x79\x53\x00\x7c\x72\xba\x9a\x43\x6a\x87\xd1\x77\x4e\xc8\x17\xe9\xc3\xa0\xcb\xc1\xc6\x22\xdc\x67\x06\xb2\x7d\x44\xe9\x31\xc6\xe9\xbf\xb3\x7d\x7c\x3b\x8b\xc0\xa3\xf5\x7d\xc9\xd1\x73\xdc\x3e\x67\x8b\xef\x6b\x55\x27\x45\x12\x34\xcf\xc7\x69\xde\xe0\xb3\x92\x57\x12\x3d\x2b\x75\x5f\x26\x76\x67\xe5\xae\xe4\xf0\x9a\xfa\x82\xfe\x58\xae\x3e\xab\x3d\x70\xcb\xf8\xbf\x30\xf1\x05\x92\x5a\x8e\x23\x6f\x47\x9c\x14\x92\x94\x13\xa3\x54\x6e\x47\xbd\x19\x2e\xcb\xdd\x35\x2c\x45\x34\x6d\x99\xe2\xef\x8a\xf6\xa6\x18\x6e\x80\xfb\xef\xcf\x5e\x25\xd6\xd3\x41\x3a\xb6\xf4\xeb\xa5\xf8\x99\xb7\xe5\xd3\x7c\xba\x7e\x7b\x12\xf2\x8f\xa6\xd8\xa5\x24\x9e\xe3\xfd\xac\xea\xfa\x3f\x98\xea\x99\xa9\x6a\xff\x78\xa2\x6f\x15\xe9\x24\x51\x9e\x02\xcd\xca\x74\xa7\x51\x4e\xa8\xbc\x1e\x0c\xfe\xb7\x9a\xf8\x5a\xeb\xb3\xa9\xfc\xbf\xad\xcd\x67\xf6\x65\xae\xf9\x29\xcb\xf2\x4f\x00\x00\x00\xff\xff\x80\xf5\x0a\x99\xcf\x0a\x00\x00") +var _testE2eTestingManifestsStorageCsiExternalAttacherRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x56\xdf\x8b\x1b\x47\x0c\x7e\xdf\xbf\x42\xdc\xbe\xb4\xe0\xdd\xed\xa5\x50\x8a\xfb\x74\xb9\xfe\x20\xd0\x96\x92\x2b\x81\x52\x02\x91\x67\xb4\x5e\xd5\xb3\x33\xcb\x48\x63\xc7\xf9\xeb\xcb\xcc\xda\x17\x5f\xec\x34\x3f\x8e\x96\x3e\x79\x98\xd5\x48\xfa\x3e\x49\x9f\x5c\xc3\xf7\x01\x7c\x50\x20\xcb\xba\x00\x1b\x76\xde\x05\xb4\x64\xa1\x8f\x61\x84\x41\x75\x92\x65\xd7\xad\x59\x87\xb4\x6a\x4d\x18\xbb\x4d\x5a\x51\xf4\xa4\x24\x8d\x11\xee\xe8\xb5\x52\xf4\xe8\x1a\x54\x45\x33\x50\xec\x22\xee\xba\xed\xd7\xed\x75\xfb\x55\x67\x69\x72\x61\x7f\xf2\xa4\xeb\xe2\x0a\x4d\xbb\xc7\xd1\x55\x35\xf4\x21\x82\x11\x6e\x6c\xe4\x2d\xc5\x66\x08\xa2\xcd\x84\x3a\xc0\xf6\xba\xfd\xa6\x7d\x52\xd5\xb0\xda\x83\x92\x68\x47\x4f\xa8\xcb\x07\xf6\xeb\x66\x44\xcf\x3d\x89\x4a\x27\x1a\x22\xae\xa9\xe4\x91\x26\x8b\x4a\xc5\x47\x76\xd1\xca\x50\xd5\x55\x0d\xbf\x0f\x2c\xf0\xc7\xcd\x2f\x3f\x43\xcf\x8e\xc0\x04\xaf\xc8\x5e\x00\x9d\x83\xe7\x4f\x6f\x6e\x21\xac\xfe\x22\xa3\x02\x3a\xa0\x02\x46\x02\x4f\x86\x44\x30\xee\x41\x03\xc4\xe4\xe1\x88\xb0\xaa\xe1\xf6\xee\x19\x1c\x71\xb6\xc5\xff\x33\x0f\x53\x0c\x36\x19\xe5\xe0\x17\x40\x68\x86\x62\x35\x43\x82\x99\x80\x91\xbc\xc2\x80\x92\x3d\xae\x08\x4c\x12\x0d\x23\xbf\x21\xbb\xac\x6a\x68\xf2\x2d\x6e\x03\xdb\x9c\x5d\xef\xd8\xa8\x2c\x20\x09\x81\x0f\xbe\xb1\xd4\x63\x72\x0a\x1e\x47\x92\x09\x0d\x01\x7a\x0b\x96\xfb\x9e\x62\xf6\x5a\xee\xab\x1a\xa0\x90\x99\x5f\xdc\x5b\x5a\x20\xaf\xac\x4c\x02\x8e\x37\x04\x3a\x10\xdc\xba\x24\x4a\xf1\x79\x70\x54\x42\x5b\x32\x6c\x09\x76\x03\xe9\x40\xb1\x98\x9c\xa4\x1c\x69\x72\x6c\x50\x49\xca\x97\x23\x11\x19\x60\x09\x79\xa4\x62\x01\xec\x61\x37\xb0\x19\xc0\xa0\x10\x38\x42\x4b\x51\x06\x9e\x80\x1c\x15\x6a\x60\x4c\xa2\x19\x3c\x79\x5c\x39\xb2\xdf\x15\x07\x9a\xab\xc3\xbe\x77\x89\xbc\x39\x44\x29\x55\x11\xd2\x34\x2d\x40\x88\x60\x45\x2e\xec\xaa\x0a\x27\x7e\x41\x51\x38\xf8\x25\x6c\xaf\xab\x0d\x7b\xbb\x84\x3b\x8a\x5b\x36\x74\x63\x4c\x48\x5e\xab\x91\x14\x2d\x2a\x2e\x2b\x28\xc4\x2c\x4b\x77\x1d\xb3\xac\x00\xea\x02\x29\xb3\xb8\x63\x1d\xde\x43\x70\x3e\x1d\x1c\x94\x8b\x25\x1c\x6c\xaa\xaa\x69\x9a\xaa\x86\x9b\x83\xc3\x7b\x4c\x19\x51\xae\xe2\x2e\xc4\xcd\xec\xf9\xb7\x17\xb2\xc8\x34\xfd\x1a\x2c\x49\x29\xd9\x8b\xe0\xd2\x48\xf3\xd3\x4c\xae\x1c\x20\x9c\x56\xe4\x14\x63\x99\x13\x4c\x3a\x84\xc8\x6f\x30\x53\xd8\x6e\xbe\x95\x96\x43\xb7\xbd\xbe\x00\xf4\x6c\x0c\x9b\x98\xbc\xa7\x58\xc5\xe4\x48\xb2\x5d\x03\x38\xf1\x4f\x31\xa4\x49\x96\xf0\xe7\xd5\xd5\xcb\x0a\x00\x20\x92\x84\x14\x0d\x95\xbb\x29\x07\x17\x25\xaf\xdb\x92\xad\x1c\x8c\xb6\x14\x57\xc5\x60\x4d\x7a\xb5\x80\x2b\xc7\x52\x7e\x77\xa8\x66\xc8\x87\xa9\x1c\x5e\x9e\x47\x39\x4c\xe8\x21\xf5\x4b\x31\x8d\xb0\xcf\x24\x7d\x54\xa8\xcf\x8a\x30\x63\xc1\xb7\xcc\xff\x07\xa8\xce\x62\x76\xa2\xa8\xe9\x2c\xf4\x31\x44\x7d\x47\x26\x92\xc2\x44\x71\x64\xc9\x1d\x00\x2c\x10\xa6\x5c\x77\x74\x6d\x55\xff\x50\xc6\x06\x58\x81\x7b\xd8\x87\x04\x9e\xc8\xc2\x16\x5d\xa2\x59\xa9\xa5\x38\x68\xab\xfa\xc7\x10\x81\x5e\xe3\x38\x39\x5a\x14\xcb\x01\xb7\x04\x1b\xda\xc3\x2b\x23\xdc\x3e\xcc\xbd\xcb\x82\x18\x83\x73\x14\x9b\x29\xad\x1c\xcb\xd0\xcc\x9e\x8a\x8e\xbc\xca\x83\x7d\x37\xbf\xb8\x75\x28\xd2\x4e\x18\x71\x24\xa5\x28\x55\x9d\xa7\xf3\xb8\x20\x1e\x6e\x85\xf6\xb0\x2f\x38\x74\x36\x18\xe9\x66\x97\xd2\xa0\xb7\x8d\x89\x64\xb3\x30\xa1\x93\x76\xd0\xb2\x07\x2e\xf4\x66\x7d\x46\xe9\xc1\xc7\xf1\xdb\xc5\xea\xbd\x9c\x47\xf4\x6c\xb2\x9e\xb2\xb7\xec\xd7\x8f\x19\xb0\x53\x25\x69\x62\x9e\x56\x49\xf3\xea\x98\x87\xeb\xa2\x20\xe5\x44\x2f\x0a\xd1\xa7\x49\xd1\x45\x31\xca\x39\x3c\xa7\x3e\x47\x3f\x57\x92\x0f\xca\x02\xdc\x33\xfe\x0f\x4c\x7c\x82\xe0\xe5\xd5\xc5\xeb\x11\x27\x81\x10\xf3\x02\x10\xca\xca\x5e\x14\xdd\xa4\xf8\x76\x57\x65\x10\x55\x9d\xbb\xf8\x8b\x2c\x8b\xc1\xbb\x3d\x70\xff\xe5\xc5\x9d\xc1\x72\x5c\x17\x87\x92\x7e\xbe\x4a\x3e\x52\xf9\xdf\xcf\xa7\xe9\xd7\x47\x8d\x7d\xa7\x8b\x4d\x08\xd1\xb2\x3f\xcd\xaa\x8c\xff\x83\xae\x9e\x99\x2a\xf7\xef\x76\xf4\xbd\x0e\x1d\x85\xc9\x92\x23\xa5\x7c\x9a\xff\xec\xe4\x93\x89\x94\x4f\x0f\x1a\xff\xdf\xea\xf8\x82\xf5\xd1\x54\xfe\xdf\xc6\xe6\x03\xf3\x32\x63\xfe\x98\x61\xf9\x3b\x00\x00\xff\xff\x5b\x7e\x86\xea\x4b\x0b\x00\x00") func testE2eTestingManifestsStorageCsiExternalAttacherRbacYamlBytes() ([]byte, error) { return bindataRead( @@ -2377,7 +2380,47 @@ func testE2eTestingManifestsStorageCsiExternalAttacherRbacYaml() (*asset, error) return a, nil } -var _testE2eTestingManifestsStorageCsiExternalProvisionerRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x56\x41\x6f\xe3\x36\x13\xbd\xeb\x57\x0c\xac\xcb\xf7\x01\x96\x8b\xbd\x15\xea\x29\x1b\x14\xc5\x02\x5b\xb4\xc8\x2e\x0a\x14\xc5\x02\x4b\x93\x23\x6b\x1a\x9a\x54\x39\x43\xb9\xd9\x5f\x5f\x0c\x2d\x3b\x4e\xa2\x24\xde\x4d\x0a\xf4\x26\x90\xd4\xbc\x79\x6f\xa8\xf7\x54\xc3\xc7\x9e\x18\x7e\xbf\xf8\xf9\x3d\x74\xe4\x11\x6c\x0c\x62\x28\x30\x18\xef\xe1\xea\xed\xc5\x25\xc4\xf5\x9f\x68\x85\x41\x7a\x23\x60\x12\x42\x40\x8b\xcc\x26\xdd\x80\x44\x48\x39\x00\xfe\x2d\x98\x82\xf1\x55\x0d\x97\x1f\xde\xc1\x90\xe2\x48\x4c\x31\x60\x5a\x55\x75\x55\xc3\xbb\xa0\x6b\x2e\x5b\xa1\x18\x96\x80\xc6\xf6\xe5\xa0\x4b\x34\x62\x02\x87\x83\x8f\x37\x5b\x0c\x02\xbd\x61\x2d\xba\x46\xb0\x99\x25\x6e\xe9\x0b\xba\xb6\xaa\xa1\xd1\x55\x33\x46\x72\xda\x60\xe7\xc9\x0a\x2f\x21\x33\x42\x88\xa1\x71\xd8\x99\xec\x05\x82\xd9\x22\x0f\xc6\x22\x98\xe0\xc0\x51\xd7\x61\xd2\xaa\x65\xbd\xaa\x01\xa0\x8b\xa9\xbc\x71\x3c\xe9\x00\x83\x90\x10\x32\x78\xba\x46\x90\x1e\xe1\xd2\x67\x16\x4c\x57\xd1\x63\x81\x76\x68\xc9\x21\xec\x7a\x94\x1e\x53\x39\x72\xd2\x72\xc2\xc1\x93\x35\x82\x5c\x76\x0e\x5a\x28\xc1\x02\x79\xa2\xc6\x12\x28\xc0\xae\x27\xdb\x83\x35\x8c\xe0\xd1\x38\x4c\xdc\xd3\x00\xe8\xb1\xa8\x03\xdb\xcc\xa2\xfc\x31\x98\xb5\x47\xf7\x43\xa9\x21\x3a\x23\x0a\x9d\xcf\x18\xec\x04\x54\x66\xc3\x28\x79\x58\x02\x23\xc2\x1a\x7d\xdc\x55\x95\x19\xe8\x37\x4c\x8a\xd7\xc2\xf8\xa6\xba\xa6\xe0\x5a\xf8\x80\x69\x24\x8b\x17\xd6\xc6\x1c\xa4\xda\xa2\x18\x67\xc4\xb4\x15\x14\x6d\x5a\xb0\x4c\xcd\x49\xa3\x15\x40\x5d\x88\xa9\x96\x3b\x92\xfe\x11\x99\xf5\x69\xaa\x51\x16\x5a\x98\xce\x54\x55\xd3\x34\x13\xf8\xa9\x9c\xa7\xdd\xa5\xb5\xb1\x2b\x93\xa5\x8f\x89\xbe\x18\x25\xbf\xba\xfe\x9e\x57\x14\xbf\x1b\xdf\xcc\xb4\x78\x10\xf6\xb4\xcf\x26\xe5\xa0\xed\xa6\xec\x91\xdb\xd2\xf5\xc7\x1e\xa1\x8b\xde\xc7\x1d\x85\x0d\xe8\x06\x70\x1f\xb3\x77\x2a\x6a\x0e\x36\x6e\x75\x6a\xe8\xca\x55\x18\x7c\xde\xe8\x5d\x2f\x57\x3b\xe1\x5f\x99\x12\x02\xa3\x4d\x28\x5c\xaa\x95\x43\x07\x3c\x0a\x9b\x55\x59\x6d\xc0\x0c\xf4\x53\x8a\x79\xe0\x16\xfe\x58\x2c\x3e\x95\x55\x80\x84\x1c\x73\xb2\x58\x56\xa7\x32\xc7\xcd\x11\xd3\xba\x6c\x6c\x50\x16\x4b\x58\x78\x62\x29\x9b\x73\xd5\xee\xd5\x1a\x54\x33\x16\x0c\x32\x46\x9f\xb7\xc8\xd3\xa1\xd9\x9a\x4b\x58\xec\x8c\xd8\x5e\x1f\x6c\x42\x23\xa8\x4f\x0e\x3d\x0a\x7e\x2b\xa0\xf5\x86\xb6\x67\xa3\xe6\xc1\x99\x79\x2c\x96\x98\xcc\x06\xa7\x41\xcf\x21\x4f\x27\xac\x37\xcc\x67\xf2\x3c\x93\x13\x8e\x18\xe4\x41\xc5\x27\x24\x9b\x68\x2c\x61\x31\x3c\x86\xc3\xc1\x0c\xdc\x47\x59\x3d\x4f\x6c\x9a\xdc\xf4\xc2\x93\xcc\x5e\x13\x48\xfd\x7c\x8e\xf7\x73\x78\xcf\xc2\x58\xa6\x10\xdd\xeb\x8e\xe8\x6b\x0a\xce\x3b\xcc\x5b\x0a\x8e\xc2\xe6\x25\x46\x73\xcf\x0b\x9b\xa4\xc6\xc5\x79\x9f\x82\x6d\x21\x32\xeb\xaa\xda\xf4\x63\x6e\xfa\x75\x7e\x3a\xeb\xa8\xda\xc6\x15\x76\xda\xc0\x43\x5f\x3d\xc7\x24\xe1\xa8\xff\x13\x92\xec\x65\xad\xe1\xd7\xdb\xf7\x8f\x91\xa4\x81\xa4\x39\xbc\x8b\xe9\x7a\xcf\x02\x83\x1b\x22\x05\xd1\x70\x02\x9b\xd3\x6d\xda\x6a\xef\x55\x0d\xd4\xc1\xff\x34\x8b\x63\xf0\x37\x40\xdd\xff\x67\x23\x8f\xf8\x90\x76\xd3\x3c\xbf\x3d\x2a\x5e\x98\x5a\x4f\xca\x68\xbb\xcd\x21\x68\x6a\xf8\x45\x09\xc5\x80\x10\xbb\x92\xc6\x77\x23\x87\x4b\x70\xdc\xaa\x13\x93\x12\x67\x64\xe5\x3a\xe5\x8c\x83\xb5\x61\x54\x69\xaa\x1a\x76\x1a\x40\xc4\x9a\xe7\xe5\xd5\xcf\x4d\xb3\x57\xaa\x39\xa8\xd4\xc8\xcd\x80\x9f\x57\xf0\xe3\xb1\xa8\xfe\x8a\x39\x1c\x12\xea\xcf\x87\xd3\x09\x74\x66\x8c\x49\x3b\x7a\x5f\xc0\x56\xd5\xdc\x27\x77\xd7\x13\x0f\xd5\xca\xd6\xfd\x4f\xee\xe8\x89\x87\x6f\x6f\x0a\x91\x3b\xde\x38\xf9\xe5\xa7\xfb\x60\x36\xc6\xe4\x28\x9c\x4e\xeb\x21\xfe\x5e\x95\x57\x00\x3f\x71\x83\x7f\xd1\x06\xca\x1d\x78\xf1\x2d\xfb\x0f\x7a\xc9\xf3\x26\xb2\x67\x7e\x8e\x83\xfc\x13\x00\x00\xff\xff\xd9\x60\x5c\x55\x53\x0c\x00\x00") +var _testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorAgentRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x54\xc1\x6e\x1c\x37\x0c\xbd\xeb\x2b\x08\xcf\x75\x67\x26\xf6\xa1\x28\xb6\x27\xc7\x05\xda\x00\x6d\x61\x38\x85\x81\xa2\xe8\x81\x2b\x71\x56\xac\x35\xd2\x40\xa4\x66\xeb\x7c\x7d\x21\xed\xc6\x30\x90\x8d\x9b\x34\x3e\xad\xc0\x25\x1f\xf9\x1e\xf9\xa6\x83\x1f\x13\xc4\xa4\x40\x8e\x75\x03\x2e\x1d\x62\x48\xe8\xc8\xc1\x94\xd3\x0c\x5e\x75\x91\xed\x38\xee\x59\x7d\xd9\x0d\x36\xcd\xe3\x43\xd9\x51\x8e\xa4\x24\xbd\x15\x1e\xe9\x1f\xa5\x1c\x31\xf4\x9e\x30\xa8\xef\xe7\x14\x59\x53\x1e\x33\x1e\xc6\xf5\xcd\x70\x35\xbc\x19\x1d\x2d\x21\x3d\x3e\x2b\xfc\x5c\x51\x8f\x7b\x8a\x3a\xe6\x1d\xda\xe1\x11\xe7\x60\x3a\x98\x52\x06\x2b\xdc\xbb\xcc\x2b\xe5\xde\x27\xd1\x7e\x41\xf5\xb0\x5e\x0e\xdf\x0d\x57\xa6\x83\xdd\x23\x28\x89\x8e\x74\x45\x63\x7d\x70\xdc\xf7\x33\x46\x9e\x48\x54\x46\xd1\x94\x71\x4f\x6d\xd4\xb2\x38\x54\x6a\x18\x15\x62\x10\x6f\x3a\xd3\xc1\xef\x9e\x05\xfe\xb8\xfe\xf5\x17\x98\x38\x10\xd8\x14\x15\x39\x0a\x60\x08\x70\xf7\xf6\xfa\x06\xd2\xee\x6f\xb2\x2a\xa0\x1e\x15\x30\x13\x44\xb2\x24\x82\xf9\x11\x34\x41\x2e\x11\x3e\xf2\x31\x1d\xdc\xbc\x7f\x07\x47\x56\x70\x62\x05\x8d\xd5\xd0\x7a\xbd\x8b\xb0\xe4\xe4\x8a\x55\x4e\x71\x03\x84\xd6\xb7\x8a\x23\x3d\x38\x2a\x35\x53\x54\xf0\x28\x15\x7d\x47\x60\x8b\x68\x9a\xf9\x03\xb9\xad\xe9\xa0\xaf\x51\x5c\x13\xbb\x3a\xe9\x14\xd8\xaa\x6c\xa0\x08\x41\x4c\xb1\x77\x34\x61\x09\x0a\x11\x67\x92\x05\x2d\x01\x46\x07\x8e\xa7\x89\x72\x45\x6d\x71\xd3\x01\x34\x61\x6b\xc5\x53\xa6\x03\x8a\xca\xca\x24\x10\xf8\x81\x40\x3d\xc1\x4d\x28\xa2\x94\xef\x52\xa0\xd6\xda\x91\x65\x47\x70\xf0\xa4\x9e\x72\x4b\x79\x36\x72\xa6\x25\xb0\x45\x25\x69\xff\x7c\x14\xa5\x12\x6c\x2d\xcf\xc9\xb2\x01\x8e\x70\xf0\x6c\x3d\x58\x14\x82\x40\xe8\x28\x8b\xe7\x05\x28\x50\x93\x09\xe6\x22\x5a\x85\xa0\x88\xbb\x40\xee\x87\x06\xa6\x75\x6b\x1c\xa7\x50\x28\xda\x53\xc7\xb6\x2d\x21\x2d\xcb\x06\x84\x08\x76\x14\xd2\xc1\x18\x5c\xf8\x9e\xb2\x70\x8a\x5b\x58\x2f\xcd\x03\x47\xb7\x85\xf7\x94\x57\xb6\x74\x6d\x6d\x2a\x51\xcd\x4c\x8a\x0e\x15\xb7\x06\x9a\x48\xdb\x76\x75\x2f\xde\xa9\x01\xe8\x1a\xe7\x2a\xf3\x81\xd5\x7f\x66\x03\xf5\x75\x42\x6d\x81\x2d\x9c\x72\x8c\xe9\xfb\xde\x74\xf0\xf3\x19\x5d\x9e\x48\x57\xca\x75\xe5\x87\x94\x1f\x8e\x5d\x6e\xef\x65\x03\xb7\xf7\x37\xb2\x81\xdf\x92\x23\x69\x2b\xbe\x4d\x4e\x4e\xcc\x9e\x2f\xed\x39\xf5\x66\x2b\x2c\xea\x53\xe6\x0f\x58\x95\x1d\x1e\xbe\x97\x81\xd3\xb8\x5e\x9e\xe1\xff\x22\xf7\x3e\x97\x18\x29\x9b\x5c\x02\x49\xad\xe9\x01\x17\xfe\x29\xa7\xb2\xc8\x16\xfe\xbc\xb8\xf8\xcb\x00\x00\x64\x92\x54\xb2\xa5\x16\x5b\xea\x20\xa2\x14\x75\x4d\xa1\xcc\x24\xa7\xa4\x95\xf2\xae\x25\xec\x49\x2f\x36\x70\x11\x58\xda\xef\x01\xd5\xfa\x96\xf3\x3f\xc0\x6d\x40\x9e\x5f\xb5\x43\xac\x5a\xbf\xea\xc8\xc9\xbd\x2a\x1e\xad\x14\xf5\xcb\x10\x37\x70\x61\x33\xa1\x52\x7d\x2d\xa7\x26\xed\x1a\x3f\x39\xa1\xb7\x1c\x1d\xc7\xfd\xb7\x5c\xd2\x7f\x3a\xa9\xcf\xf5\x56\xa5\x1c\xbf\xb3\xc7\x73\x3a\xeb\xd2\xca\xec\xcb\xdd\xf9\x75\xfe\x3c\xeb\xd0\x3a\xd8\x1d\x4d\x75\xa4\x4f\xcd\xf5\x55\x4e\x81\xa7\x05\xbe\xa0\x9f\x31\xe6\xdf\x00\x00\x00\xff\xff\xb6\xe2\xd1\xee\x91\x07\x00\x00") + +func testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorAgentRbacYamlBytes() ([]byte, error) { + return bindataRead( + _testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorAgentRbacYaml, + "test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-agent/rbac.yaml", + ) +} + +func testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorAgentRbacYaml() (*asset, error) { + bytes, err := testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorAgentRbacYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-agent/rbac.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorControllerRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x56\xc1\x6e\x1b\x37\x10\xbd\xef\x57\x0c\xb4\x97\x16\xd0\xee\xc6\x3e\x14\x85\x7a\x72\x5c\xa0\x0d\xd0\x16\x86\x53\x18\x28\x8a\x1e\x28\x72\x56\x9c\x8a\x4b\x2e\x38\xc3\x55\x95\xaf\x2f\xc8\x95\x1c\x25\xb1\x1b\x2b\x36\x0a\xe4\x64\x82\x1e\xce\xcc\x7b\x33\xef\xad\x6a\xf8\x31\x80\x0f\x02\x68\x48\x96\x60\xc2\xce\xbb\xa0\x0c\x1a\xe8\x63\x18\xc0\x8a\x8c\xbc\xea\xba\x0d\x89\x4d\xeb\x56\x87\xa1\xdb\xa6\x35\x46\x8f\x82\xdc\x68\xa6\x0e\xff\x11\x8c\x5e\xb9\xc6\xa2\x72\x62\x9b\x21\x78\x92\x10\xbb\xa8\x76\xdd\xf4\xaa\xbd\x6c\x5f\x75\x06\x47\x17\xf6\x27\x0f\x1f\x7b\xd4\xe8\xe0\x25\x06\xe7\x30\x76\x71\xad\x74\xbb\x57\x83\xab\x6a\xe8\x43\x04\xcd\xd4\x98\x48\x13\xc6\xc6\x06\x96\x66\x54\x62\x61\xba\x68\xbf\x6b\x2f\xab\x1a\xd6\x7b\x10\x64\xe9\xf0\x12\xbb\x7c\x20\xbf\x69\x06\xe5\xa9\x47\x16\xee\x58\x42\x54\x1b\x2c\xfd\xa6\xd1\x28\xc1\x92\x23\xa7\x68\xd9\x56\x75\x55\xc3\xef\x96\x18\xfe\xb8\xfa\xf5\x17\xe8\xc9\x21\xe4\x46\x14\x79\x06\xe5\x1c\xdc\xbe\xbe\xba\x86\xb0\xfe\x1b\xb5\x30\x88\x55\x02\x2a\x22\x78\xd4\xc8\xac\xe2\x1e\x24\x40\x4c\x1e\x8e\xa0\xaa\x1a\xae\xdf\xbe\x81\x19\x1a\x1c\xa0\xc1\x7b\x68\x6d\x29\xf8\xc6\xc3\x18\x83\x49\x5a\x28\xf8\x25\xa0\xd2\xb6\x3c\x9b\x31\xc2\xcc\xd9\x80\x5e\xc0\x2a\xce\x25\xd6\x08\x3a\xb1\x84\x81\xde\xa1\x59\x55\x35\x34\xf9\x56\x4d\x81\x4c\x4e\xde\x3b\xd2\xc2\x4b\x48\x8c\xe0\x83\x6f\x0c\xf6\x2a\x39\x01\xaf\x06\xe4\x51\x69\x04\xe5\x0d\x18\xea\x7b\x8c\x39\x6b\xb9\xaf\x6a\x80\xc2\x6e\x7e\x71\x1f\x69\x00\xbd\x90\x10\x32\x38\xda\x22\x88\x45\xb8\x76\x89\x05\xe3\x6d\x70\x58\x4a\x1b\xd4\x64\x10\x76\x16\xc5\x62\x2c\x21\x27\x2d\x47\x1c\x1d\x69\x25\xc8\xe5\x3f\x47\x66\x32\xc0\x52\xf2\x51\x6e\x96\x40\x1e\x76\x96\xb4\x05\xad\x18\xc1\xa1\x32\x18\xd9\xd2\x08\xe8\xb0\x70\x05\x43\x62\xc9\x6c\xa0\x57\x6b\x87\xe6\x87\x92\x51\xf2\xfc\xc8\xf7\x2e\xa1\xd7\x87\xb2\x65\x6e\x8c\x92\xc6\x25\x30\x22\xac\xd1\x85\x5d\x55\xa9\x91\xee\x30\x32\x05\xbf\x82\xe9\xa2\xda\x92\x37\x2b\x78\x8b\x71\x22\x8d\x57\x5a\x87\xe4\xa5\x1a\x50\x94\x51\xa2\x56\x15\x14\xa6\x56\x65\xff\x3e\xbf\xb6\x15\x40\x5d\xd0\x67\xc2\x77\x24\xf6\x91\x59\xe4\xd3\x21\x75\xb9\x58\xc1\x21\xa6\xaa\x9a\xa6\xa9\x6a\xf8\xf9\x31\x86\xee\xe1\x67\xf0\x79\x03\x76\x21\x6e\xe7\x52\x37\x77\xbc\x84\x9b\xbb\x6b\x5e\xc2\x6f\xc1\x20\x97\x89\xdf\x04\xc3\x07\x8c\xa7\x33\x3c\x25\xa1\x48\x4d\x25\xb1\x21\xd2\x3b\x95\x39\x6e\xb7\xdf\x73\x4b\xa1\x9b\x2e\x1e\x60\xe2\xf3\x2c\x34\x31\x79\x8f\xb1\x8a\xc9\x21\xe7\x87\x0d\xa8\x91\x7e\x8a\x21\x8d\xbc\x82\x3f\x17\x8b\xbf\x2a\x00\x80\x88\x1c\x52\xd4\x58\xee\xc6\xdc\x0d\x0b\x7a\x99\x82\x4b\x03\xf2\x21\x68\xc2\xb8\x2e\x01\x1b\x94\xc5\x12\x16\x8e\xb8\xfc\xdd\x29\xd1\xb6\xc4\x7c\x41\x72\xed\x14\x0d\x2f\x5a\xc1\x67\xc2\x5f\xb4\xe5\x60\x5e\x34\x1f\x4e\xe8\xe5\x69\x19\x97\xb0\xd0\x11\x95\x60\x3e\x8d\x87\x22\x65\x2f\x3f\xd9\xa3\xd7\xe4\x0d\xf9\xcd\x73\xd6\xe9\x69\xc2\x6a\x62\xde\x5a\x4e\xb3\x0b\xcf\x3b\xf5\xa0\x72\x33\xbc\x33\x15\x7b\x9e\x66\x1f\x54\x6d\xee\xee\x16\xfb\xdc\xd7\xa7\x5a\x3b\x5f\x38\x70\x3f\xcf\xff\xa0\xf3\x39\x56\x91\x3f\x18\xb4\x19\xd4\xc8\x10\x62\x76\x59\xc6\x6c\x9f\xc5\x36\x75\x8a\xef\xbf\x10\x19\x66\x55\x03\xf5\xf0\x4d\x76\x93\xe0\xdd\x1e\xa8\xff\xf6\x41\x63\x26\x3e\x7a\xf2\x61\x51\xbe\xdc\x69\x9e\x69\xa2\x67\x30\xae\xfb\xcd\xd1\xa7\x3e\x52\x91\x0e\x21\x1a\xf2\xa7\x6d\x16\xf9\x7c\x20\xab\x99\xba\x72\xff\xb1\xa8\xee\xc5\x74\x54\x97\x41\x87\xb3\xa8\xe6\x1f\x21\x27\x42\xfb\x40\x5f\xff\xbf\xb0\x0a\x0b\xcf\x66\xfd\xeb\x51\xe7\xb9\xb2\x9c\xe9\x79\x8a\x26\xff\x0d\x00\x00\xff\xff\xde\x50\xa8\x24\x50\x0b\x00\x00") + +func testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorControllerRbacYamlBytes() ([]byte, error) { + return bindataRead( + _testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorControllerRbacYaml, + "test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml", + ) +} + +func testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorControllerRbacYaml() (*asset, error) { + bytes, err := testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorControllerRbacYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _testE2eTestingManifestsStorageCsiExternalProvisionerRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x57\x5b\x6f\xdc\xb8\x0e\x7e\xf7\xaf\x20\xe2\x97\x16\x18\x7b\x90\x3e\x1c\x1c\xcc\x79\x4a\xd3\xa2\x27\x40\xba\x0d\x92\xb6\xc0\x62\x51\x6c\x35\x12\x3d\xe6\x46\x23\x79\x25\x6a\x66\xa7\xbf\x7e\x41\xd9\x93\x3a\x19\xe7\xd2\xcb\x02\xfb\x66\xe8\x42\xf2\xfb\x28\xf2\xa3\x4b\x78\xe5\xc1\x79\x06\x34\xc4\x33\x30\x7e\xeb\xac\x57\x06\x0d\x34\xc1\xaf\xa1\x65\xee\xe2\x62\x3e\x5f\x11\xb7\x69\x59\x6b\xbf\x9e\x5f\xa7\x25\x06\x87\x8c\xb1\xd2\x91\xe6\xf8\x17\x63\x70\xca\x56\x5d\xf0\x1b\x8a\xe4\x1d\x86\x79\x50\xdb\xf9\xe6\x45\x7d\x5c\x1f\xcf\x0d\x76\xd6\xef\x46\xb7\xe6\xf3\xb0\x54\xba\xde\xa9\xb5\x2d\x4a\x68\x7c\x00\x1d\xa9\x32\x81\x36\x18\xaa\xd6\x47\xae\x3a\xc5\x2d\x6c\x8e\xeb\xff\xd4\x2f\x8a\x12\x96\x3b\x60\x8c\x3c\xc7\x17\x38\x97\x0f\x72\xab\x6a\xad\x1c\x35\x18\x39\xce\x23\xfb\xa0\x56\x98\x43\x49\x9d\x51\x8c\xd9\x86\x98\xa8\x63\x5b\x94\x45\x09\xef\x5b\x8a\xf0\xeb\xc9\xdb\x73\x68\xc8\x22\x68\xef\x58\x91\x8b\xa0\xac\x85\xcb\x97\x27\xa7\xe0\x97\x7f\xa0\xe6\x08\xdc\x2a\x06\x15\x10\x1c\x6a\x8c\x51\x85\x1d\xb0\x87\x90\x1c\xec\x41\x16\x25\x9c\x5e\x9d\xc1\x08\x6a\x9d\x5d\x9c\x39\x59\x33\x49\x33\x79\x37\x03\x54\xba\xcd\x07\x7b\x54\xd0\x73\xb0\x46\xc7\xd0\xaa\x28\x46\x97\x08\x3a\x45\xf6\x6b\xfa\x82\x66\x51\x94\x50\xc9\xaa\xda\x78\x32\x12\x60\x63\x49\x73\x9c\x41\x8a\x08\xce\xbb\xca\x60\xa3\x92\x65\x70\x6a\x8d\xb1\x53\x1a\x41\x39\x03\x86\x9a\x06\x83\x58\xcd\xeb\x45\x09\x90\xf9\x94\x1b\x37\x27\x0d\xa0\x63\x62\xc2\x08\x96\xae\x11\xb8\x45\x38\xb5\x29\x32\x86\x4b\x6f\x31\xbb\x36\xa8\xc9\x20\x6c\x5b\xe4\x16\x43\x3e\x32\x0a\x39\x60\x67\x49\x2b\xc6\x98\x77\xf6\x5c\x08\xc0\xec\x72\xc4\xc6\x0c\xc8\xc1\xb6\x25\xdd\x82\x56\x11\xc1\xa2\x32\x18\x62\x4b\x1d\xa0\xc5\xcc\x0e\xac\x53\x64\xc1\x8f\x4e\x2d\x2d\x9a\xff\x65\x1b\x2c\x39\x22\xd7\xd8\x84\x4e\x0f\x8e\x72\x6e\x22\x72\xea\x66\x10\x11\x61\x89\xd6\x6f\x8b\x42\x75\xf4\x11\x83\xf8\x5b\xc0\xe6\xb8\xb8\x26\x67\x16\x70\x85\x61\x43\x1a\x4f\xb4\xf6\xc9\x71\xb1\x46\x56\x46\xb1\x5a\x14\x90\xb9\x59\xe4\x37\x36\x0a\xb4\x00\x28\x33\x30\xe1\x72\x4b\xdc\xde\x43\xb3\x7c\x0d\x36\xf2\xc2\x02\x86\x33\x45\x51\x55\xd5\xe0\x7c\x4c\xe7\x38\xba\xfc\xce\x55\xe2\xd6\x07\xfa\xa2\x04\x7c\x7d\xfd\xdf\x58\x93\x9f\x6f\x8e\x27\x42\x9c\xaa\xa4\x2a\x24\x27\xe1\x86\x64\x31\x2e\x72\xd4\xef\x5b\x84\xc6\x5b\xeb\xb7\xe4\x56\x20\x1b\x10\x5b\x9f\xac\x11\x52\x93\xd3\x7e\x2d\x59\x93\x02\xf6\x01\x3a\x9b\x56\xf2\xd6\xf3\xd3\x0e\xf8\x67\xa2\x80\x10\x51\x07\xe4\x98\xad\xe5\x43\x7b\x7f\xe4\x56\x75\x5e\xad\x40\x75\xf4\x26\xf8\xd4\xc5\x05\xfc\x76\x74\xf4\x29\xaf\x02\x04\x8c\x3e\x05\x8d\x79\x75\x30\x73\xb3\xb9\xc1\xb0\xcc\x1b\x2b\xe4\xa3\x19\x1c\x59\x8a\x9c\x37\xa7\xac\xdd\xb1\xd5\x09\x67\x91\xd1\xf1\xc6\xdb\xb4\xc6\x38\x1c\x9a\xb4\x39\x83\xa3\xad\x62\xdd\xca\x87\x0e\xa8\x18\xe5\xcb\xa0\x45\xc6\xef\x75\xa8\xad\xa2\xf5\x93\xbd\xf6\x9d\x66\xca\xd7\xd0\x8f\x86\x44\x4f\x79\x1e\x4e\x68\xab\x62\x7c\x22\xce\x27\x62\xc2\x0d\x3a\x3e\xb0\xf8\x00\x65\x03\x8c\x19\x1c\x75\xf7\xf9\x89\x4e\x75\xb1\xf5\x5c\x3f\x0e\x6c\xc8\xdc\x70\xe1\x41\x64\x3f\xd3\x91\xf4\xf3\x29\xdc\x8f\xf9\x7b\xd4\x8d\x8e\xe4\xbc\xf9\xb9\x29\xfa\x36\x83\x25\x9c\x68\x51\x22\xd1\x86\x1e\xb4\x62\x56\xba\x95\x02\x8f\x40\x11\xbc\xb3\x3b\x70\x88\xa2\xd7\xdb\x16\x5d\xdf\xdd\x6f\x84\x27\x9b\xc8\x8a\xd3\x22\x5c\x7c\x78\x79\x7e\x76\xf5\xff\xdf\x3f\xfc\xb2\xff\xfa\xf8\xee\xfc\xc3\xdb\xd7\x59\x10\x83\xb7\x16\x03\x68\xd5\xa9\x25\x59\xe2\x5d\xdf\x07\xce\x5c\xdf\x39\xa4\x99\xcf\x26\x1b\x14\x6c\xc9\x5a\xc8\x21\x1f\x86\x98\x6d\xb0\x07\x83\x8c\x61\x4d\x0e\xfb\x20\x89\x25\xf6\xa8\x1a\xec\x37\xa5\x6e\x41\x0d\xd7\xeb\xef\x49\xd5\x81\xe7\x27\x52\x3c\xdd\xc4\x5f\x92\x33\xe4\x56\x3f\xd2\xcb\xef\xc8\x4d\x15\x44\x1b\x62\xea\x07\x8d\x45\x86\x38\x29\x5c\x12\xf4\x7d\x82\xf5\x6d\x92\x35\x29\x5a\x12\xc6\x25\x36\x12\xc0\xa1\x74\x3d\x45\x87\xe0\x26\x33\x0f\x50\xd2\xd3\x5a\xc2\xc5\xe8\x99\xec\x55\x5f\x34\x5f\xb2\xbe\xf5\xe1\xba\x47\x81\xce\x74\x9e\xf2\x83\x76\xa0\x53\xf8\x3a\xd0\x48\xec\x45\x09\xd4\xc0\x33\x19\x77\xf2\x63\xa7\xe6\xf9\xe4\x54\x41\x71\x3f\x50\x0c\xf9\xfc\x7e\x35\xfe\xc1\xc1\xe0\x41\x1a\x75\xb3\xda\x6b\x79\x09\xef\x04\x90\x77\x08\xbe\xc9\x15\x7a\x5b\xd5\x63\xd6\xe6\xaf\xec\xf8\x20\xc0\x23\xe6\xc2\x1f\xa4\xdc\xc0\x52\x45\x14\x6a\x8a\x12\xb6\x52\xa9\x52\x58\xc8\xf9\xea\xe7\xaa\xea\x99\xaa\xf6\x2c\x55\xbc\xeb\xf0\x73\x0d\xaf\x6f\x8c\xca\xb4\x6b\xb0\x0b\x28\xf3\x9d\x91\x0c\x34\x6a\xe3\x83\x44\x74\x9e\x9d\xd5\xc5\x54\x57\xbb\x2d\x3b\x7b\x6b\x79\xeb\x6e\xc9\xdd\xc8\xce\xbe\xf6\x06\x9d\xbe\x25\x3f\x83\x24\x7d\xba\xeb\x4c\x7b\x1f\x0c\xb9\x71\xb6\x0e\xfd\xf7\xac\xfc\x04\xe7\x25\x5c\x48\x9b\x8a\x92\xab\x9e\xfd\xd3\xab\xb3\xab\xbe\xf9\x9c\xaa\x4e\x69\xe2\x5d\x66\x6c\xdc\x76\xf3\xab\x93\x9c\x49\x06\xbb\xb4\xb4\x14\x5b\x69\x1e\xa5\x70\x38\x74\xae\xdc\x57\xf3\x6d\x72\x8d\x0f\xeb\x1e\xcf\x5d\xb0\x13\x6d\xee\xae\x1e\xed\x67\x87\xde\x1c\xdd\x83\xfa\x1b\x14\x7f\x3c\x37\xf5\xa3\xe5\x9b\xd7\xef\xa1\x1b\xd1\x90\xa7\xee\xe1\xaf\x28\x03\x16\x5e\xb6\xca\x5e\x0b\xe6\xd4\x65\xd8\x7e\xeb\x86\x7a\xd4\xad\x22\x37\xfc\xd6\x1d\x92\x57\x8b\x87\x9e\xc2\x98\x9a\x86\x34\x49\xad\xcb\xd9\xd1\xef\xc6\x86\x54\x51\xc2\x15\x2b\xc6\x26\xd9\x2b\x64\x78\x76\xc3\x77\x56\xc2\x15\x32\x5c\x78\xf3\x3c\xff\x03\xbd\xfa\x7a\xf1\xd9\xf8\x84\x24\xd3\x9b\x7c\x84\x45\x73\x2e\xfb\x9f\x18\x31\xc7\x1e\x1a\xea\xd7\x47\xd7\x9f\x3f\xe1\xa5\x77\xde\x4c\x30\x7e\xf0\x6a\x55\xd7\xc5\xc3\xcb\xc3\x6f\x54\xc4\xa9\x42\xb9\x25\x45\xff\xa0\x06\xe5\x06\xf4\xc3\x2d\xee\x5f\x28\x64\x8f\x2b\x58\x8f\xfc\x29\xf2\xf5\x77\x00\x00\x00\xff\xff\xb8\xf2\x67\xd2\x14\x11\x00\x00") func testE2eTestingManifestsStorageCsiExternalProvisionerRbacYamlBytes() ([]byte, error) { return bindataRead( @@ -2397,7 +2440,7 @@ func testE2eTestingManifestsStorageCsiExternalProvisionerRbacYaml() (*asset, err return a, nil } -var _testE2eTestingManifestsStorageCsiExternalResizerRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x55\x41\x8b\x23\x37\x13\xbd\xf7\xaf\x28\xec\xcb\xf7\x81\xed\x65\x6f\xa1\x73\x9a\xf5\x21\x2c\x24\xb0\xcc\x2c\x03\x21\xe4\x20\x4b\xaf\xdd\x95\x91\xa5\x8e\xaa\xd4\xce\xcc\xaf\x0f\xd2\xb4\x1d\xef\xd8\x3b\x99\xac\x09\xe4\xe4\x46\x2a\x55\xd5\x7b\xf5\x5e\x79\x4e\x9f\x7b\x16\xfa\xf9\xe6\xa7\x1f\xa9\x63\x0f\xb2\x31\xa8\xe1\x20\x64\xbc\xa7\xdb\x0f\x37\x6b\x8a\x9b\xdf\x60\x55\x48\x7b\xa3\x64\x12\x28\xc0\x42\xc4\xa4\x47\xd2\x48\x29\x07\xc2\x1f\x8a\x14\x8c\x6f\xe6\xb4\xbe\xfb\x48\x09\xc2\x4f\x48\xab\x66\xde\xcc\xe9\x63\xa0\x21\x45\x97\xad\x72\x0c\x0b\x82\xb1\x7d\x0d\x72\x89\x47\x24\x72\x18\x7c\x7c\xdc\x21\x28\xf5\x46\x4a\xc2\x0d\xc8\x66\xd1\xb8\xe3\x27\xb8\xb6\x99\xd3\xb2\x9c\x9a\x31\xb2\x2b\xcd\x75\x9e\xad\xca\x82\xb2\x80\x42\x0c\x4b\x87\xce\x64\xaf\x14\xcc\x0e\x32\x18\x0b\x32\xc1\x91\xe3\xae\x43\x2a\x59\xeb\x79\x33\x27\xa2\x2e\xa6\xfa\xe2\x18\xe9\x08\x41\x59\x19\x42\x9e\x1f\x40\xda\x83\xd6\x3e\x8b\x22\xdd\x46\x8f\x5a\xda\xc1\xb2\x03\xed\x7b\x68\x8f\x54\x43\x4e\x5a\x4e\x18\x3c\x5b\xa3\x90\x7a\x73\xe0\xa1\x00\xac\x25\x27\x26\x16\xc4\x81\xf6\x3d\xdb\x9e\xac\x11\x90\x87\x71\x48\xd2\xf3\x40\xf0\xa8\xcc\xd0\x2e\x8b\x16\xec\x08\x66\xe3\xe1\xbe\xaf\xef\xb5\xcc\x86\x43\xe7\x33\x82\x9d\x8a\xd4\x99\x08\x34\x0f\x0b\x12\x80\x36\xf0\x71\xdf\x34\x66\xe0\x7b\x24\xe1\x18\x5a\x1a\xdf\x37\x0f\x1c\x5c\x4b\x77\x48\x23\x5b\xdc\x58\x1b\x73\xd0\x66\x07\x35\xce\xa8\x69\x1b\xaa\xbc\xb4\x64\x85\x97\x53\x93\x0d\xd1\xbc\x02\x2a\x1c\xee\x59\xfb\xaf\xd0\x5b\xbe\xa6\xf7\xf5\xa0\xa5\x29\xa6\x69\x96\xcb\x65\x33\xa7\xdb\xe7\x7c\x47\x44\x05\x4f\x19\xe1\x3e\xa6\x87\xe7\xc4\x9f\xee\xd7\xb2\xa0\x4f\xf7\xb2\xa0\xbb\xb5\xac\xa6\x66\x4f\xa9\x3f\x45\x93\x36\xc6\xae\x4c\xd6\x3e\x26\x7e\x32\x85\xac\xd5\xc3\x77\xb2\xe2\xf8\x6e\x7c\x7f\x01\xd2\x61\x08\x07\x5c\xcb\x94\x43\x40\x6a\x52\xf6\x90\xb6\xa2\xfc\xdc\x83\xba\xe8\x7d\xdc\x73\xd8\x52\xb9\x20\xe9\x63\xf6\xae\xb4\x9b\x83\x8d\xbb\x32\x5d\xb8\x2a\x99\xc1\xe7\x6d\xf1\x43\x95\x7f\xc2\xef\x99\x13\x48\x60\x13\x54\x6a\xb6\x1a\x94\xe2\xc8\xa5\x5d\x0e\xdb\x55\x3d\x5d\x92\x19\xf8\x87\x14\xf3\x20\x2d\xfd\x32\x9b\xfd\x5a\x4f\xab\x24\x62\x4e\x16\xf5\x74\x4a\x73\xbc\x1c\x91\x36\xf5\x62\x0b\x9d\x2d\x68\xe6\x59\xea\xef\xde\xa8\xed\x6b\xd4\xa5\xb4\x2f\x92\x0e\x85\x38\x51\x04\x1d\xa3\xcf\x3b\xc8\x14\xf4\x7a\xf2\x05\xcd\xf2\xe0\x8c\xa2\x7c\x0d\xd7\xd4\xb3\xde\xf0\xee\x6d\x45\xaf\xaa\xf0\x4e\xd4\x68\x3e\x2b\xf4\x8d\x28\x30\x22\xe8\x59\xb2\x33\x8e\x6c\xc2\x94\xfd\xbc\x4e\xd5\xff\x99\x96\x3f\x70\x70\x1c\xb6\xd7\x48\xfa\xc4\xa5\xcb\x54\xec\x21\xf9\x79\x27\xb7\x15\xdb\x45\xaf\x17\x18\x97\x3c\xfe\xcf\x5c\x7e\xd1\xe7\xa5\x85\x5b\x74\xa5\xf8\xb9\x73\xff\xce\x86\x74\x1c\xc5\x2b\x34\xbc\x7d\x95\x20\x38\x1a\x22\x07\x2d\x2b\xd6\xe6\xf4\xd7\xce\x2f\x3d\x37\x73\xe2\x8e\xfe\x57\xfe\x11\x62\xf0\x8f\xc4\xdd\xff\x2f\x2e\x5f\x96\xc3\xde\x9d\xe6\xf7\xed\x4b\xe8\xca\x1d\xfa\x55\xfa\x6c\xb7\x3d\xac\xb0\x17\x6a\xb6\x31\x26\xc7\xe1\xb4\xa9\x2a\xe3\x2f\xe4\xed\x61\x64\xda\x04\x2f\x2d\x79\xd4\xf6\x41\xec\x0e\x1e\x2f\x35\x3e\xe9\xfe\x0b\x91\xff\x4b\xea\xae\x50\xaf\x26\xf2\x3f\x66\x91\xd7\xbd\xf1\x8c\xf8\x2d\xc6\xf8\x33\x00\x00\xff\xff\x47\xf8\xe7\x76\xb0\x09\x00\x00") +var _testE2eTestingManifestsStorageCsiExternalResizerRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x55\x4d\x6f\xe3\x36\x10\xbd\xeb\x57\x0c\xa2\x4b\x0b\x58\x52\xb3\x87\xa2\x50\x4f\xd9\x14\x28\x16\x68\x81\x45\xb2\x08\x50\x14\x3d\xd0\xe4\xc8\x9c\x86\x22\x55\xce\x50\x6e\xf2\xeb\x0b\xd2\x8a\xeb\xb5\xbd\x69\xb6\x69\x81\x3d\x59\xe0\xc7\x9b\x79\x8f\xf3\x9e\x6b\xf8\x21\x80\x0f\x02\x68\x48\x56\x60\xc2\xd6\xbb\xa0\x0c\x1a\x18\x62\x18\xc1\x8a\x4c\xdc\x77\xdd\x86\xc4\xa6\x75\xab\xc3\xd8\xdd\xa7\x35\x46\x8f\x82\xdc\x68\xa6\x0e\xff\x14\x8c\x5e\xb9\x26\x22\xd3\x23\xc6\x2e\xaa\x6d\x37\x5f\xb6\x97\xed\x37\x9d\xc1\xc9\x85\x87\x83\x1b\x5d\x17\xd7\x4a\xb7\x0f\x6a\x74\x55\x0d\x43\x88\xa0\x99\x1a\x13\x69\xc6\xd8\xd8\xc0\xd2\x4c\x4a\x2c\xcc\x97\xed\xb7\xed\x9b\xaa\x86\xf5\x03\x08\xb2\x74\xf8\x06\xbb\xfc\x41\x7e\xd3\x8c\xca\xd3\x80\x2c\xdc\xb1\x84\xa8\x36\x58\xda\x48\x93\x51\x82\x05\x23\x43\xb4\x6c\xab\xba\xaa\xe1\x83\x25\x86\x5f\xae\x7e\xfe\x09\x06\x72\x08\x3a\x78\x51\xe4\x19\x94\x73\x70\xf3\xf6\xea\x1a\xc2\xfa\x77\xd4\xc2\x20\x56\x09\xa8\x88\xe0\x51\x23\xb3\x8a\x0f\x20\x01\x62\xf2\xf0\x44\xb0\xaa\xe1\xfa\xf6\x1d\x2c\x34\xdb\x02\xff\xce\xc3\x14\x83\x49\x5a\x28\xf8\x15\xa0\xd2\xb6\x1c\xda\x31\x82\x1d\xff\x11\xbd\x80\x55\x9c\x01\xd7\x08\x3a\xb1\x84\x91\x1e\xd1\xf4\x55\x0d\x4d\x5e\x55\x73\x20\x93\x9b\x1b\x1c\x69\xe1\x15\x24\x46\xf0\xc1\x37\x06\x07\x95\x9c\x80\x57\x23\xf2\xa4\x34\x82\xf2\x06\x0c\x0d\x03\xc6\x8c\x5a\xd6\xab\x1a\xa0\x68\x99\x6f\xec\x4f\x1a\x40\x2f\x24\x84\x0c\x8e\xee\x11\xc4\x22\x5c\xbb\xc4\x82\xf1\x26\x38\x2c\xa5\x0d\x6a\x32\x08\x5b\x8b\x62\x31\x96\x23\x07\x2d\x47\x9c\x1c\x69\x25\xc8\x65\xe7\x49\x87\x4c\xb0\x94\x5c\x94\x58\x01\x79\xd8\x5a\xd2\x16\xb4\x62\x04\x87\xca\x60\x64\x4b\x13\xa0\xc3\xa2\x0c\x8c\x89\x25\x73\x47\xaf\xd6\x0e\xcd\xf7\xe5\xbe\xe4\xb7\x21\x3f\xb8\x84\x5e\x2f\x45\xca\x9b\x30\x4a\x9a\x56\xc0\x88\xb0\x46\x17\xb6\x55\xa5\x26\xba\xc3\xc8\x14\x7c\x0f\xf3\x65\x75\x4f\xde\xf4\x70\x8b\x71\x26\x8d\x57\x5a\x87\xe4\xa5\x1a\x51\x94\x51\xa2\xfa\x0a\x8a\x2e\x7d\x99\xad\xa5\xc9\x0a\xa0\x2e\x84\xb2\x86\x5b\x12\xfb\x09\x79\xf3\xd7\x72\xbf\x2c\xf4\xb0\x9c\xa9\xaa\xa6\x69\xaa\x1a\x6e\x76\x78\x7b\x46\x99\x4f\x7e\xc2\x6d\x88\xf7\x3b\xe0\xf7\x77\xd7\xbc\x82\xf7\x77\xbc\x82\xdb\x6b\x6e\x97\x66\x0f\xa5\x3f\x64\x53\xfc\xa0\x92\xd8\x10\xe9\x51\x65\xb1\xda\xfb\xef\xb8\xa5\xd0\xcd\x97\x67\x28\x1d\xbb\xad\x89\xc9\x7b\x8c\x55\x4c\x0e\xb9\x2f\x2c\x3f\x58\x84\x21\x38\x17\xb6\xe4\x37\x90\x37\x80\x6d\x48\xce\xe4\x76\x93\xd7\x61\xcc\xaf\x9b\x0d\x1e\x22\x4c\x2e\x6d\xb2\x1f\xca\xf8\x47\xfc\x23\x51\x44\x60\xd4\x11\x85\x0b\x5a\x39\x14\xc3\x4c\xb9\x5d\xf2\x9b\xb6\xac\x36\xa0\x26\xfa\x31\x86\x34\x71\x0f\xbf\x5e\x5c\xfc\x56\x56\xcb\x48\x84\x14\x35\x96\xd5\x05\x66\xbf\x39\x63\x5c\x97\x8d\x0d\xca\xc5\x0a\x2e\x1c\x71\xf9\xdd\x2a\xd1\xb6\x9c\x3a\x07\x7b\x04\x3a\x65\xe1\x58\xd0\xcb\x1c\x5c\x1a\x91\x97\x43\xcf\x83\xaf\xe0\x62\x7a\x4d\x15\xed\x14\x8d\x2f\x2b\xf5\xd2\x0a\xc1\xfc\xb7\x78\x67\x3b\xee\x58\x94\xa4\x93\x42\x9f\xa5\x05\xce\xe8\xe5\x04\xe2\x44\x5f\x1d\x51\x09\xe6\xaf\x5d\x14\x1f\x6a\x5e\xbc\x73\xe2\x83\xb7\xe4\x0d\xf9\xcd\x6b\xec\x70\xe0\xf0\x26\x66\x6b\x71\xda\xe5\x79\x5f\xb8\x9d\xcd\x89\x4c\xe3\x5c\x3e\x7c\x5e\x42\x9c\xcd\x88\xdc\xc2\x0d\x0e\xb9\xf8\xa9\xeb\xff\xc9\xc2\xb0\x7f\x8a\x67\x64\x78\x79\x0c\xa1\x37\x30\x05\xf2\x92\xe3\x59\xa7\xf8\xf7\xff\x45\xee\xb9\xaa\x81\x06\xf8\x2a\xff\x9b\x04\xef\x1e\x80\x86\xaf\xcf\x06\x37\xf1\x53\x66\x2f\xef\xf7\xef\x03\xec\x95\xf9\xfb\x49\xf9\xf4\xb0\x79\x8a\xbf\xa3\x69\xd6\x21\x44\x43\xfe\xb0\xa9\x32\xc6\x1f\x8d\xb7\x43\xc5\x4b\x8a\x1c\x1b\x71\x3f\xdb\x4f\xc3\x6e\xd0\xe1\xf1\x8c\x2f\x73\xff\xd1\x90\xff\x4f\xd3\x5d\xa8\xbe\x5a\xc8\x2f\xcc\x22\xcf\x7b\x63\xc7\xf8\x25\xc6\xf8\x2b\x00\x00\xff\xff\xee\x88\x1a\xe2\xc9\x0a\x00\x00") func testE2eTestingManifestsStorageCsiExternalResizerRbacYamlBytes() ([]byte, error) { return bindataRead( @@ -2417,27 +2460,27 @@ func testE2eTestingManifestsStorageCsiExternalResizerRbacYaml() (*asset, error) return a, nil } -var _testE2eTestingManifestsStorageCsiExternalSnapshotterRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x55\x4d\x8f\xdb\x36\x10\xbd\xf3\x57\x0c\x56\x57\xcb\x41\x6e\x85\x6e\x9b\x2d\x50\x04\x68\x11\x60\x13\x14\x28\x0a\x1f\xc6\xe4\xc8\x62\x97\x26\x85\xe1\x50\x5b\xe7\xd7\x17\xa4\x25\xc5\x1f\xea\xc2\x4d\x9d\x1b\x41\x0d\x67\xde\x7b\x33\x6f\x54\xc1\x97\xb0\x23\xe9\x88\xe1\xd5\x4a\x07\xd2\x11\x3c\x7f\x78\x7c\x82\xd6\x3a\x82\x36\x30\xd0\xdf\x42\xec\xd1\xd5\x3d\x87\xc1\x46\x1b\x3c\xf1\x0a\xa4\xb3\x11\xfe\x78\xfc\xed\xd7\x12\xa8\x2a\xd0\xc1\x0b\x5a\x1f\x01\x9d\x3b\x66\x08\xdb\xbf\x48\x4b\x04\xe9\x50\x00\x99\xc0\x93\xa6\x18\x91\x0f\x20\x01\x38\xf9\x39\x35\x3c\x7d\xfe\xa8\x2a\x88\x1e\xfb\xd8\x05\x11\xe2\xb5\xaa\x54\x05\x1f\x3d\xf4\x1c\x4c\xd2\x62\x83\x5f\x01\xa1\xee\x72\x28\x18\xb6\x03\x31\x18\xea\x5d\x38\xec\xc9\x0b\x74\x18\x73\xd2\x2d\x81\x4e\x51\xc2\xde\x7e\x25\xd3\xa8\x0a\xea\x7c\x8b\x43\xb0\x26\x03\x6c\x9d\xd5\x12\x57\x90\x22\x81\x0f\xbe\x36\xd4\x62\x72\x02\x1e\xf7\x14\x7b\xd4\x04\xe8\x0d\x18\xdb\xb6\xc4\x39\x6b\xb9\x57\x15\x40\x11\x22\xbf\x98\x23\x0d\x90\x17\x2b\x96\x22\x38\xfb\x42\x45\xb7\x27\x97\xa2\x10\x3f\x87\x22\x48\x0d\xa1\xcf\xb8\xd1\xb9\x03\x30\xe5\x97\x25\xea\x22\xcd\xc9\x23\xb0\x6d\x8e\x60\x2a\x25\xb3\x62\x33\xe6\x63\x73\x42\xe9\xd3\x37\xda\x51\x29\xec\xed\xef\xc4\xb9\x2b\x0d\x0c\xef\xd5\x8b\xf5\xa6\x81\xcf\xc4\x83\xd5\xf4\xa8\x75\x48\x5e\xd4\x9e\x04\x0d\x0a\x36\x0a\x0a\xa5\x06\x74\xb4\xf5\x89\xda\x4a\xd5\x75\x3d\xbe\x3d\x25\x71\x9a\x9c\xb7\xa8\xd7\x98\xa4\x0b\x6c\xbf\x62\x26\xb6\x7e\xf9\x29\xae\x6d\x78\x37\xbc\x3f\xab\x50\x4d\x64\x27\x36\xe7\x4c\x66\x0c\xf3\x5c\x9d\x00\xa9\x39\x79\x4f\xac\x38\x39\x8a\x39\x59\x0d\xd8\xdb\x5f\x38\xa4\x3e\x36\xf0\xe7\xc3\xc3\x46\x01\x00\x30\xc5\x90\x58\x53\xb9\xa3\x21\x2b\x31\x7e\x19\x88\xb7\xe5\xd6\xd9\x28\x0f\x2b\x78\x78\x45\xd1\x5d\x3e\x68\x26\x14\xca\xa7\xd4\x9b\xf1\xd4\x97\x8f\x9b\xdb\xea\x44\xd2\x4c\xd7\x85\x76\x54\xea\x94\x7a\x0b\x99\x26\x72\xeb\x28\x81\x71\x47\xa3\x68\x4b\x05\x86\xe0\xd2\x9e\xa6\x07\xda\x61\x8c\xf4\x66\xb9\x99\xde\x5d\xeb\x06\x2f\x4b\x82\x7e\xd3\x6f\x11\xc2\x99\xae\x86\x1c\x09\xfd\x10\x58\xef\xa2\xa0\xa4\x2b\x74\x63\xed\xcd\xbf\x4c\xf2\x07\xeb\x8d\xf5\xbb\xef\x1b\xe8\x45\xcb\xd4\x9c\x0d\x12\xd3\x71\xc7\x1d\x47\x75\xd1\x7c\x19\xe6\xb2\xe9\xf2\x97\x6c\x96\xde\xe5\xcd\x53\x0c\xbe\xbc\x94\xf2\x69\xce\x53\xae\x1a\x18\xa3\x54\x86\xf1\x4c\x6d\x06\x70\xed\xdf\x9c\x5f\x77\xe8\x77\xe3\xe6\xc9\xae\x44\x17\x03\x14\x5b\x1e\xfd\x79\xb6\x80\x76\x24\x71\xb4\xaf\xb9\xc9\xa8\x30\xb7\xf7\x0d\x41\x4f\x9b\xf2\xfd\x7b\xe5\x9a\x3c\x54\xf0\xe5\xd3\xcf\x9f\x9a\x73\x0d\x27\xa6\x47\xed\x0e\x21\xc1\x2b\x7a\x29\x1b\xfc\x10\x12\x43\xb4\x86\x34\xf2\xdb\xf4\x1c\xa1\x21\x26\x47\xe5\xd7\x33\xed\xa3\x8b\x61\xd6\x21\xb0\xb1\xfe\x14\x74\x99\xcb\xb3\x31\x76\x84\x93\x8d\x2f\x4d\x3c\x3b\x67\xb2\xd2\xe8\x9b\x33\x2f\x8d\xbe\xdb\x5c\x8a\x78\x87\x91\xbe\x85\xf9\xfd\x75\xff\xdf\x9e\xb9\x37\xa0\x2b\x0b\x8d\xde\xf9\x2f\x22\xdd\xe4\x82\x7f\x02\x00\x00\xff\xff\x99\x98\x92\xb6\x6c\x09\x00\x00") +var _testE2eTestingManifestsStorageCsiExternalSnapshotterCsiSnapshotterRbacCsiSnapshotterYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x55\xdd\x6e\xdc\x36\x13\xbd\xd7\x53\x0c\xac\xdb\x95\x14\x07\x1f\x3e\x14\x7b\xe7\x38\x6d\x11\xa0\x45\x00\x3b\x08\x50\x14\x06\x32\x4b\xce\xae\x58\x53\xa4\xc0\x19\xca\xdd\x3c\x7d\x41\xea\xc7\xfb\xd7\xc0\x4d\xdd\x3b\x81\x1c\xce\xcc\x39\x33\xe7\xa8\x84\xf7\x1e\x9c\x17\x20\x6d\x64\x05\xda\x3f\x39\xeb\x51\x93\x86\x6d\xf0\x1d\xb4\x22\x3d\xaf\x9b\x66\x67\xa4\x8d\x9b\x5a\xf9\xae\x79\x8c\x1b\x0a\x8e\x84\xb8\x52\x6c\x1a\xfa\x53\x28\x38\xb4\x15\x3b\xec\xb9\xf5\x22\x14\x9a\x80\x4f\xcd\xf0\xbf\xfa\x4d\xfd\xa6\xd1\xd4\x5b\xbf\x3f\x78\xd5\x28\x36\xc7\xc1\x1b\x54\xd5\xc9\x61\xbd\xc7\xce\x16\x25\x6c\x7d\x80\x74\xa5\x83\x19\x28\x54\xad\x67\xa9\x7a\x94\x16\x86\xeb\xfa\xff\xf5\xdb\xa2\x84\xcd\x1e\x84\x58\x1a\x7a\x4b\x4d\xfa\x30\x6e\x57\x75\xe8\xcc\x96\x58\xb8\x61\xf1\x01\x77\x94\x3b\x8d\xbd\x46\xa1\x9c\x23\xa5\xa8\xb9\x2d\xca\xa2\x84\x4f\x7e\x47\xd2\x52\x80\x27\x23\x2d\x48\x4b\x70\xf7\xee\xe6\x16\xb6\xc6\x52\x2e\xbf\x00\xec\x83\x1f\x0c\x1b\xef\x28\xac\x40\x5a\xc3\xf0\xdb\xcd\xaf\xbf\xe4\xc0\xa2\x04\xe5\x9d\xa0\x71\x0c\x68\xed\x98\xc1\x6f\xfe\x20\x25\x0c\xd2\xa2\x00\x06\x02\x47\x8a\x98\x31\xec\x41\x3c\x84\xe8\x96\xd4\x70\x7b\xff\xa1\x28\xe1\x10\x7f\x6e\xed\x83\x83\x3e\x78\x1d\x95\x18\xef\x56\x40\xa8\xda\x14\x0a\x23\x1b\x30\x52\xdb\x91\x13\x68\x91\x53\xd2\x0d\x81\x8a\x2c\xbe\x33\x5f\x49\xaf\x8b\x12\xaa\x74\x8a\x83\x37\x3a\x35\xb8\xb5\x46\x09\xaf\x20\x32\x81\xf3\xae\xd2\xb4\xc5\x68\x05\x1c\x76\xc4\x3d\x2a\x02\x74\x1a\xb4\xd9\x6e\x29\xa4\xac\xf9\xbc\x28\x01\x32\x11\xe9\xc5\x12\xa9\x81\x9c\x18\x31\xc4\x60\xcd\x23\x65\xde\x6e\x6d\x64\xa1\x70\xe7\x33\x21\x15\xf8\x3e\xf5\x8d\xd6\xee\x21\x50\x7a\x99\xa3\x4e\xd2\x1c\x3c\x02\xb3\x4d\x11\x81\x72\xc9\xc4\xd8\xd2\xf3\x38\x1c\x9f\xe7\xf4\x0c\x9b\x8b\x02\x7b\xf3\x99\x42\x9a\xca\x1a\x86\xeb\xe2\xd1\x38\xbd\x86\x7b\x0a\x83\x51\x74\xa3\x94\x8f\x4e\x8a\x8e\x04\x35\x0a\xae\x0b\xc8\x90\xd6\x70\xb2\x6d\x45\x51\x55\xd5\xf4\xf6\x10\xc4\x61\xf2\xb4\xa5\x35\x46\x69\x7d\x30\x5f\x31\x01\xab\x1f\x7f\xe0\xda\xf8\x66\xb8\x3e\xaa\x50\xce\x60\x67\x34\xc7\x48\x96\x1e\x2e\x09\xa7\x0a\xd1\x39\x0a\x45\x88\x96\x38\x25\xab\x00\x7b\xf3\x73\xf0\xb1\xe7\x35\xfc\x7e\x75\xf5\x50\x00\x00\x04\x62\x1f\x83\xa2\x7c\x46\x43\x62\x62\xba\x19\x28\x6c\xf2\xa9\x35\x2c\x57\x2b\xb8\x7a\x42\x51\x6d\xfa\x50\x81\x50\x28\x7d\x8d\x3a\x48\x5f\x7d\xbe\x7c\xc8\x4d\xdf\x93\x0a\x24\xd0\x53\xe8\x0c\x27\xc8\x60\x78\x99\x60\x9d\x43\x7e\x74\xb8\x49\x53\x92\x04\x6d\xef\x63\x98\x17\xd1\x11\x69\x06\xce\x19\xc6\xd0\x9f\xb2\x70\xb0\xeb\x2d\xad\xe0\x8b\x62\x53\x4f\x4a\x9c\x49\x3b\x04\x3d\x3e\xcc\x4b\xf1\x25\x55\x65\x12\x30\x0e\x3e\x7b\x1b\x3b\xba\x9f\x02\x6f\x2d\x32\xd7\x53\xab\xb4\x18\xd3\xb1\x1b\xd5\x93\x4f\x19\xdf\x68\xaf\xb8\x19\x33\x73\x85\x4e\x57\x2a\x90\x4e\x3b\x8b\x96\xeb\x56\x3a\x9b\x77\xba\xf3\x81\x40\x93\xa0\xb1\x53\xf2\x8b\x94\x97\x67\xac\x4f\x99\x9f\x6f\x17\xe6\x77\x94\x89\xcf\x03\x78\x38\x1f\xe1\x0c\xfc\x84\x90\x4b\x93\x1d\x32\x01\xf3\x03\x95\x08\xa0\xb3\x41\x1f\x96\x5b\xe6\xfd\xaa\x75\xbd\x93\x4b\x1b\xf6\xbc\x50\x17\x5b\x38\x5a\x34\x4d\x96\x84\xfe\x93\xb6\x1a\x16\x94\x78\xd6\xdd\x54\xfb\xe1\x6f\xa4\xfd\xce\x38\x6d\xdc\xee\xfb\x14\x7e\xd1\x43\xaa\x90\x1c\x83\xe3\x68\xfa\xa3\x76\x2f\xba\x51\x6a\xf3\xb2\x0b\xa5\x9b\xe4\x1e\xbd\x4d\x56\x9c\x1d\xef\xb2\x4b\xa7\xaf\x25\x4f\x3e\x5a\xc3\x14\x55\xa4\x36\xee\x68\x9b\x1a\x38\x37\xb4\x94\x5f\xb5\xe8\x76\x93\x15\x27\x9b\x42\xcb\x1e\xb2\x4f\x8d\x86\x75\xe4\xc8\x3b\x12\x9e\xfc\x4c\xbf\xc8\xb9\x60\x19\xef\x37\x08\x3d\x1c\xca\xf7\x1b\xed\x39\x78\x28\xe1\xd3\xc7\xf7\x1f\xd7\xc7\x1c\xce\x48\x47\xee\xf6\x3e\xc2\x13\x3a\xc9\xf2\xcf\x26\xc6\x46\x93\xc2\xf0\x6d\x78\x96\x50\x53\x20\x4b\xf9\x5f\x3c\x1b\xf4\xc9\x32\x2b\xef\x83\x36\xee\xb0\xe9\xbc\x97\x47\x6b\x6c\x09\x67\x19\x9f\x8a\x78\x51\xce\x2c\xa5\x49\x37\x47\x5a\x9a\x74\xf7\x70\x4a\xe2\x2b\xac\xf4\x4b\x90\xbf\x3e\xef\xff\x5a\x33\xaf\xdd\xd0\x99\x84\x26\xed\xfc\x13\x92\x5e\xa4\x82\xbf\x02\x00\x00\xff\xff\x9c\x83\x40\x88\x7d\x0b\x00\x00") -func testE2eTestingManifestsStorageCsiExternalSnapshotterRbacYamlBytes() ([]byte, error) { +func testE2eTestingManifestsStorageCsiExternalSnapshotterCsiSnapshotterRbacCsiSnapshotterYamlBytes() ([]byte, error) { return bindataRead( - _testE2eTestingManifestsStorageCsiExternalSnapshotterRbacYaml, - "test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml", + _testE2eTestingManifestsStorageCsiExternalSnapshotterCsiSnapshotterRbacCsiSnapshotterYaml, + "test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml", ) } -func testE2eTestingManifestsStorageCsiExternalSnapshotterRbacYaml() (*asset, error) { - bytes, err := testE2eTestingManifestsStorageCsiExternalSnapshotterRbacYamlBytes() +func testE2eTestingManifestsStorageCsiExternalSnapshotterCsiSnapshotterRbacCsiSnapshotterYaml() (*asset, error) { + bytes, err := testE2eTestingManifestsStorageCsiExternalSnapshotterCsiSnapshotterRbacCsiSnapshotterYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _testE2eTestingManifestsStorageCsiGcePdController_ssYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x55\x51\x6b\x23\x47\x0c\x7e\xf7\xaf\x10\xee\xf3\xac\xe3\x94\x96\xb2\xe0\x07\x93\x33\x6e\xb8\x9c\x63\x2e\xa1\x7d\x3c\x94\x59\x79\x3d\xf5\xec\xcc\x56\xd2\x6e\xe2\x96\xfe\xf7\x32\xd9\x75\xba\x4e\x1c\x68\x0e\x92\x01\x83\xd1\xe8\xfb\x24\x7d\xd2\xac\x76\x2e\x14\x39\xdc\x28\x2a\x6d\x1a\x7f\x43\x3a\xc2\xda\xfd\x46\x2c\x2e\x86\x1c\xb0\xae\x65\xd2\x4e\x47\x15\x29\x16\xa8\x98\x8f\x00\x02\x56\x94\x83\x15\x67\x4a\x4b\xa6\x2e\x8c\x8d\x41\x39\x7a\x4f\x3c\x92\x9a\x6c\xf2\x11\xe2\xd6\x59\x5a\x3d\xba\x8e\xff\xf3\x1d\x8f\x00\x98\x6a\xef\x2c\x4a\x0e\xd3\x47\x4f\x4f\x56\x23\x27\x14\x40\x85\x6a\xb7\x57\x78\x47\x5e\x3a\x03\xa4\x14\x72\x28\x6d\x6d\x6c\xac\xea\x46\xc9\xd4\x29\x39\x51\x0a\x6a\x0a\x27\x3b\x93\xd8\x0b\x76\x2d\xf1\x08\x40\xa9\xaa\x3d\x2a\xf5\x74\x83\xb4\xd3\xf1\x47\xcc\x6f\xe5\x06\x38\x94\x97\xce\x0f\xf0\x6b\x14\x85\x40\x7a\x1f\x79\x07\x55\x23\x0a\x77\x04\x8d\x50\x01\x9b\xc8\xe0\x82\x12\xa3\x55\x17\x03\xdc\x3b\xdd\xc2\xef\x91\x77\x3e\x62\x01\x97\x05\x05\x75\xba\x07\x17\x60\xf9\x79\xf1\xc4\x27\x2e\x58\x02\xa7\x8f\x0a\xa1\x25\x81\xe5\xc5\x02\xbe\xf4\x35\xc0\x0d\x71\x4b\xdc\x71\x2d\x3f\xbf\xb8\xc8\xe0\x2b\x55\xb1\xa5\x27\x3a\xdd\x3a\x01\xa6\x3f\x1b\xc7\x54\x51\x50\xb8\xdf\x52\x00\x27\xd2\x10\x3c\xde\x48\xf4\x2d\x15\x80\xa1\x80\x3b\xda\x44\x26\xc0\xb0\x07\x7a\xa8\xa3\x34\x4c\x10\x37\x4f\x54\x15\x29\x3b\x2b\x50\x47\x56\xe9\xad\xdb\x28\xba\xea\x8a\xcf\x41\xb9\x39\x04\xee\x5b\x3f\xb7\x36\x36\x41\x57\xaf\x0f\x8b\x11\xec\x21\xc9\x86\x2e\x10\x0f\x5a\x63\x06\x73\x26\x01\x6b\xd9\x46\xd5\xbe\x0d\xdd\x71\x15\x96\x94\xc3\xee\x17\xc9\x4a\xcb\x99\x8b\x13\x71\xa5\x11\x8d\x8c\x25\x4d\x9e\xe1\xf2\xf6\xc7\xec\x2c\x3b\x1f\xc0\x91\xcb\x41\xb8\x2e\xe4\xd8\x98\x76\xf6\xd3\xf8\xa5\x35\xb1\x61\x51\x30\x89\xcc\x12\x75\xfa\x65\x12\xed\x6e\xfc\x3c\xa1\x75\xe3\xfd\x3a\x7a\x67\xf7\x39\xcc\xfd\x3d\xee\x65\xe0\xd1\x46\xdf\x54\xf4\x25\x09\xf3\x22\x76\x57\x6e\xe2\xa4\x34\x7c\x7c\x74\x0d\x50\x25\xd0\x1a\x75\x9b\x43\x0a\x7e\x52\xa7\x9a\x63\xeb\xd2\xc3\x7d\xa3\x4e\x03\x5c\xde\x4e\xb3\x9f\xb3\xb3\x77\xd5\xa9\xf7\xdd\x10\x6a\xc3\x64\x4a\x54\x92\xd9\x6d\xac\xa3\x8f\xe5\x7e\x96\x46\x69\xfc\x71\x9a\xa1\x2a\xda\xed\x1b\x05\x3b\x80\xf2\xf6\x3c\x3b\x7f\x67\xb5\xde\xb9\x7e\x26\x71\x7f\xbd\xb1\xfc\x1e\x93\x66\x65\xfa\x31\xb3\xb2\xc5\x50\x78\x32\x9d\x16\xc6\x85\x46\xc8\x10\x73\xe4\xd9\x06\xbd\x7c\xd4\xbc\xf4\x9f\xb0\xc1\x42\x38\x52\xac\x57\xab\xdc\x91\x61\xf2\x84\x42\x93\xff\xb7\x5b\x92\x90\x67\xd9\xd4\x94\x3b\xfa\x7e\x39\x29\x14\x75\x74\x41\x67\x4d\x70\x0f\xf9\xab\x82\x52\x68\x4f\xab\xb2\xbc\xbe\x5e\x5e\x2d\xbe\xcd\xd7\xeb\xab\xcb\x8b\xf9\xed\xe5\xf5\xea\xdb\xc5\xd7\xc5\xa7\xc5\xea\xf6\x72\x7e\x75\xf3\x4c\xa9\x16\x7d\x93\x36\xfb\x84\xd4\x4e\xac\x8f\x4d\x61\x04\x9f\xfe\x64\x7f\x48\x0c\xef\xdc\x93\x21\xf8\x10\xb7\x9f\x8f\x67\x0c\x4c\x58\x5c\x07\xbf\x3f\xda\x52\x27\xe8\x8f\x8b\x39\xa4\xdf\x51\x9e\x58\x4c\x27\x33\xa6\xaa\xd6\xfd\x27\xc7\x39\xfc\xfd\xcf\xcb\xe7\xf6\x6a\x9a\x42\x96\x49\x8f\x95\xe9\x6c\xab\x23\xe4\xe8\xdf\x00\x00\x00\xff\xff\x29\xdb\x32\x41\xae\x09\x00\x00") +var _testE2eTestingManifestsStorageCsiGcePdController_ssYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x4d\x6f\xe3\x36\x13\xbe\xfb\x57\x0c\xbc\xef\xe1\xed\x41\xfe\xca\x2e\x50\x08\xf0\xc1\x70\x5c\xd7\xd8\xac\x63\xc4\x41\x7b\x0c\x18\x72\x64\xb1\xa6\x48\x96\x1c\x29\xf1\x16\xfd\xef\x05\x2d\xd9\x91\x65\x1b\x4d\x0a\x6c\x92\x16\x4b\xc0\x80\xc1\x79\xe6\x8b\x33\xcf\x50\xd2\x5a\x6a\x11\xc3\x92\x18\x61\x92\xab\x25\x52\x8b\x59\xf9\x0b\x3a\x2f\x8d\x8e\x81\x59\xeb\xbb\x45\xbf\x95\x21\x31\xc1\x88\xc5\x2d\x00\xcd\x32\x8c\x81\x7b\x19\xad\x38\x46\x56\x44\xdc\x68\x72\x46\x29\x74\x2d\x6f\x91\x07\x8c\x47\x57\x48\x8e\xf3\x2d\xb4\xfd\x84\x6d\xb7\x00\x1c\x5a\x25\x39\xf3\x31\xf4\xb7\x48\x85\x9c\x8c\x0b\x5a\x00\x19\x23\x9e\x5e\xb1\x7b\x54\xbe\xdc\x80\x10\x42\x0c\x2b\x6e\x23\x6e\x32\x9b\x13\x46\x36\x04\xe7\x09\x35\x45\x42\xfa\x75\x14\xac\x0b\x27\x0b\x74\x2d\x00\xc2\xcc\x2a\x46\x58\x99\xab\x85\x1d\x96\x3a\xb0\xfc\x52\xdb\x00\xbb\xf4\xc2\xfa\x00\x3f\x1b\x4f\xa0\x91\x1e\x8c\x5b\x43\x96\x7b\x82\x7b\x84\xdc\xa3\x80\xc4\x38\x90\x9a\xd0\x31\x4e\xd2\x68\x78\x90\x94\xc2\xaf\xc6\xad\x95\x61\x02\x66\x02\x35\x49\xda\x80\xd4\x30\xfd\x3c\xd9\xdb\xf3\x52\x73\x04\x49\xdb\x13\x62\x1c\x3d\x4c\xc7\x13\xf8\x52\xe5\x00\x4b\x74\x05\xba\xd2\xd6\xf4\xf3\x91\xa0\x03\x37\x98\x99\x02\xf7\xe6\x28\x95\x1e\x1c\xfe\x9e\x4b\x87\x19\x6a\x82\x87\x14\x35\x48\xef\x73\x84\xad\xc4\x1b\x55\xa0\x00\xa6\x05\xdc\x63\x62\x1c\x02\xd3\x1b\xc0\x47\x6b\x7c\xee\x10\x4c\xb2\x37\x95\x21\x39\xc9\x3d\x58\xe3\xc8\x57\xbb\xa9\xf1\x34\x2f\x93\x8f\x81\x5c\xbe\x73\x5c\x95\x7e\xc4\xb9\xc9\x35\xcd\xcf\x37\x4b\xe4\x59\xa5\x12\xf6\x98\xd4\xe8\x6a\xa5\x89\x6a\x7d\xe6\x35\xb3\x3e\x35\x44\x55\x19\xca\x25\x33\xb6\xc2\x18\xd6\x3f\xfa\xce\x8a\xbb\x8e\x34\x5d\x2f\x57\x91\x27\xe3\xd8\x0a\xbb\x0d\xbd\xb8\xb8\xe8\xf4\x3a\x17\x35\x75\xe6\x56\x35\x77\xa5\xcb\x76\x14\x15\xc3\x4f\xed\xe3\xdd\x60\x8d\x09\xe1\xd0\xfb\x61\x30\x1d\x7e\x1d\x6f\xf8\xfa\x04\xb6\x3a\xac\x3d\x3e\x1e\x0c\x7a\xfd\x8f\x27\x80\x0a\x99\x40\x17\x6d\x09\x20\x8d\xfe\x7b\x44\x14\x8e\xc4\x5b\xc6\x71\xf8\xbf\xff\x2f\x2e\xc7\xcb\xd9\xdd\x7c\xf4\x65\xb2\x5c\x8c\xc6\x93\x1f\x4e\xa8\x93\xcc\xd0\xe4\x34\xbc\xe8\xf5\x7c\x5d\x8c\xba\x68\x66\x5e\x1e\x76\xc3\xe6\x01\x06\xa0\x60\x2a\xc7\x9f\x9c\xc9\xe2\x86\x00\x20\x91\xa8\xc4\x0d\x26\xc7\x92\x4a\xb6\x60\x94\xc6\x7b\x3e\x76\xf6\x89\x34\xeb\xb9\xc8\x95\x5a\x18\x25\xf9\x26\x86\x91\x7a\x60\x1b\x5f\x43\x14\x46\xe5\x19\x7e\x09\x7d\x75\x54\xba\x32\x81\x50\x12\x0c\xdc\x75\x8d\x40\xb2\xa0\x54\x06\x11\x6a\x77\xb2\xcd\xac\x33\x85\x0c\x73\xef\x85\x6d\x56\xd3\x8b\x8b\x41\xa7\xdf\xe9\xbd\x46\x9b\x25\xc8\x28\x77\x18\xad\x18\xa1\x1f\xde\x1a\x6b\x94\x59\x6d\x86\x81\x89\x27\xd0\x29\x91\x8d\x50\x0b\x6b\xa4\xa6\xb2\x25\xfb\xdf\xae\xe1\x06\x9f\x0e\x1b\xae\x12\xe3\x23\x39\x16\x71\x87\x8c\x30\xda\xf5\x42\x1d\xf7\xa1\xc4\xb9\x5c\x1f\xcc\x89\x72\xa2\x0c\x13\xa6\x3c\xb6\x03\x4a\x48\xcf\xee\x15\x02\xa5\x08\x4f\xc0\xdd\xe8\x01\x93\x6c\x25\xe3\xe5\x0c\x6a\xc3\xfb\xc8\x85\x36\x02\x8f\x8c\xef\x50\x75\x17\x01\xf8\x2c\xe3\xcf\x26\xb6\xc0\x84\xe5\x8a\xa2\xc4\xd3\xc6\xe2\x10\x1f\xe9\xe3\xfb\x27\xe8\x76\xfa\x37\xe3\xda\xcf\xee\x85\x71\x14\xc3\xb6\xb1\x1a\x3e\xca\xd0\x0f\x3a\xb0\x81\xb0\xce\x90\xe1\x46\xc5\x70\x3b\x5e\xd4\x64\x4a\x16\xa8\xd1\xfb\x85\x33\xf7\x78\xe8\x39\x61\x52\xe5\x0e\x6f\x53\x87\x3e\x35\x4a\x94\x4f\x13\x4f\x2b\xb8\x9b\x22\x35\xf3\xb5\xe5\x08\x48\x91\x29\x4a\xbf\x76\x1b\x95\x6a\x82\xb7\x29\x9d\x0f\x5c\x6a\x49\x92\xa9\x4b\x54\x6c\xb3\x44\x6e\xb4\x08\x0f\x35\xbd\x03\x4c\x45\x89\x73\x62\x8b\x4e\x1a\xb1\x97\x0e\x7a\xaf\x37\xee\x18\x11\xe3\xe9\x0b\x67\xdd\x4e\x29\xdc\xa7\xaf\x34\xe8\x4e\x8c\xae\xc1\x5b\xde\xa6\xcd\xe1\xf6\xef\x26\xeb\xe0\x3b\x59\x9f\xd6\xfb\x25\xab\x43\x2f\xbf\xbe\x90\xab\x95\x4e\x5c\xf4\xdf\x90\xaa\x17\x6f\x41\xd5\x94\x69\xa1\x30\x2a\x6b\x12\x49\x9d\x7b\x8c\xd0\x39\xe3\xaa\x6b\xfe\x3f\xc3\xde\x8b\xef\xec\x7d\x5a\xef\x93\xbd\xd5\x7b\xef\xd1\xb3\xe2\x31\x7f\xb9\x32\xb9\x28\xdf\x26\xc2\x49\xad\xb8\xed\x3e\xef\xdb\x44\x60\xf8\xa0\x33\xf8\xa7\x0c\xdf\x13\x36\xd7\xf2\x31\x3e\xcb\xf1\xb3\x4c\x99\x5e\x5f\x4f\xaf\x26\x77\xa3\xc5\xe2\x6a\x36\x1e\xdd\xce\xae\xe7\x77\xe3\x9b\xc9\xe5\x64\x7e\x3b\x1b\x5d\x2d\x4f\x11\x27\x86\x76\x17\x89\x57\x19\x7b\xb6\xff\xd3\xf9\xcd\x1f\xce\x83\x6f\x50\x98\xba\xf2\xce\x6f\x35\x2a\x1a\x16\x1c\x32\x71\xad\xd5\xe6\xe0\xfb\xc6\x09\xf3\x87\xc9\xec\xc2\x2f\x4d\x9e\xf8\xa4\x71\x32\x62\xcc\x2c\x6d\x2e\xa5\x8b\xe1\x8f\x3f\x8f\x6f\x80\xb3\x61\x7a\xe4\xae\x49\xbd\x72\x6f\x7e\xa0\xd9\xfa\x2b\x00\x00\xff\xff\x4e\x94\x1d\xc2\xe8\x13\x00\x00") func testE2eTestingManifestsStorageCsiGcePdController_ssYamlBytes() ([]byte, error) { return bindataRead( @@ -2457,7 +2500,7 @@ func testE2eTestingManifestsStorageCsiGcePdController_ssYaml() (*asset, error) { return a, nil } -var _testE2eTestingManifestsStorageCsiGcePdCsiControllerRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x57\xc1\x8e\xe2\x38\x10\xbd\xe7\x2b\x4a\xf4\x65\x57\x22\x44\xbd\xa7\x15\xb7\x5d\x0e\x7b\x45\xcd\xaa\x2f\xa3\x39\x38\x4e\x75\xf0\xe0\xd8\x96\xab\x9c\x6e\xfa\xeb\x47\x0e\x01\x85\x81\x81\x34\x10\xba\xb9\x10\x45\x65\xbf\x7a\xe5\x7a\xcf\x95\x87\xf8\x83\x99\x35\xec\xad\xd6\xe8\x61\x81\xbe\x56\x12\xe1\x1f\x29\x6d\x30\x3c\x86\x27\xab\x91\x36\x7f\xb9\x32\x85\x32\x25\x25\xc2\xa9\x67\xf4\xa4\xac\x99\x42\xfd\x98\xac\x94\x29\xa6\xdb\x95\xed\xc2\xa4\x42\x16\x85\x60\x31\x4d\x00\x8c\xa8\x70\x0a\x92\x54\x5a\x4a\x4c\x5d\x91\xca\x1d\x60\x4a\x22\x49\xd2\x34\x4d\x1e\xe0\xcd\xe3\xcb\x14\x96\xcc\x8e\xa6\x59\x56\x2a\x5e\x86\x7c\x22\x6d\x95\xad\x42\x8e\xde\x20\x23\xa5\x92\x54\x86\x6f\x8c\xde\x08\x9d\x3a\x6f\x6b\x15\xb3\x40\x9f\xe5\xda\xe6\x59\x25\x88\xd1\x67\x05\x3a\x6d\xd7\x9d\x65\x99\xcf\x85\x9c\xac\x45\xa5\xdb\x5c\x67\x3a\xc4\xc8\x48\x6a\x8f\x4c\x13\x27\x02\x2f\xad\x57\xef\x82\x95\x35\x93\xd5\xdf\x34\x51\x36\xab\x1f\x4f\x33\xea\xe4\x92\xfa\xb8\xad\x0f\x1a\x29\x86\xa6\x20\x9c\xfa\xcf\xdb\xe0\x68\x0a\xdf\x46\xa3\xef\x09\x00\x80\x47\xb2\xc1\x4b\x6c\xde\xb9\x88\x4f\x8c\x86\x6b\xab\x43\x85\xd4\x06\xd5\xe8\xf3\x26\xa0\x44\x1e\x8d\x61\xa4\x15\x35\xff\xaf\x82\xe5\x32\x3e\x48\x8f\x82\x31\x3e\x15\xa8\x91\xb1\x59\x77\x01\xa0\xd4\x42\x55\xbd\x51\x83\x2b\xc4\x71\x2c\x62\xeb\x45\x89\x6d\xd1\x8e\x21\xb7\x11\x52\x0b\xa2\x9e\x3c\x7b\x72\xc2\x1a\x0d\x1f\xec\x78\xa2\x64\x2d\x8d\x31\x8c\xdc\xef\x70\xce\xf3\x91\xa4\x8c\x2d\x6e\xcb\xe4\xaa\x0d\xc9\x08\x47\x4b\xcb\x93\xf3\xb9\xb7\xcd\xd6\x2e\x38\x89\x78\x4b\xa0\x28\xfe\x63\x47\xb5\x8f\xd7\x98\xc2\xa1\x5c\xff\xdd\x78\xd0\x8d\x54\xdb\xf1\xa1\xae\x80\x5b\xa3\x4b\x28\xe4\x3f\x50\x72\x2b\xe3\xa3\x36\x17\x19\x9c\xb1\xb7\x68\x07\x4f\xf8\x12\x37\x39\x74\x9f\x3e\x56\x02\xbb\xc2\x9f\xe0\x9a\x00\x5c\xea\xa3\x82\x59\xc8\xe5\x17\x30\xd1\x6d\x22\x77\x74\xd0\x1e\x26\x70\x7b\x89\x0e\xee\x2a\x7d\x25\xb9\x29\x78\x75\x4e\x8d\xa7\x0b\x76\x6f\xa5\xee\xba\xe4\x73\x65\xba\xdf\xac\xfd\x34\xba\x29\xd5\x03\x3c\x21\xa9\x77\xf4\x50\x05\x62\xc8\x11\x44\xae\x11\xd8\xc2\xab\xf5\x2b\x78\x55\xbc\x84\xf9\xf3\x8c\xc6\x30\x7f\xa6\x31\x2c\x66\x34\x19\x4c\x72\x7e\x93\xc9\xd7\x57\xdc\x15\x23\xcb\x55\x08\x19\xb1\xe0\x70\x00\x74\x21\x8b\x1b\x0c\x29\x4d\x0f\x0d\xac\xb6\x6d\x57\x7c\xae\xc2\xf6\x7a\xf3\x03\x02\xbb\xe0\x0a\xdc\xce\x26\x7c\xf6\x16\x8c\xf9\x75\xa3\x63\x2e\xe9\x2f\x2f\x87\xbd\x25\x3b\x40\x1f\x97\xed\x20\x53\xf2\xc5\x93\xe0\x75\x5f\x01\x83\x4e\xa0\xc7\x2d\xab\xfd\xcc\x1a\xa6\x1a\x6d\x56\xbd\x1d\xe7\x1e\x4e\xd0\x55\x70\xa7\xef\x3e\xd7\x18\x0e\x14\xf0\x21\x73\x70\x5e\xd5\x4a\x63\x51\x62\x01\x73\x5b\xc0\x02\x65\xf0\x8a\xd7\x30\xb7\x5a\xc9\xf5\x18\x9c\xc7\x5a\xd9\x40\x7a\x0d\x05\xbe\x28\x83\x05\xd4\x4a\xc0\x7c\xb3\xae\xc4\xe2\x7f\x24\x9e\x2f\xe6\x87\x45\xff\xe3\xcf\x41\xce\xc3\x91\x6b\x2c\xa6\x53\xb5\xc2\xab\x3a\xb2\xc7\x52\x11\x7b\xd1\xd6\xe1\x8e\xc7\x81\x7f\x61\xca\x48\x9c\xba\x5d\x59\x52\x47\xae\xe7\x51\xfc\x0c\x00\x00\xff\xff\x9f\xb0\xc3\xe5\x78\x12\x00\x00") +var _testE2eTestingManifestsStorageCsiGcePdCsiControllerRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x57\x4d\x6f\xe3\x36\x10\xbd\xeb\x57\x0c\x9c\x4b\x0b\x98\x16\xb6\xa7\x85\x6f\x6d\x0e\xbd\x1a\x49\x91\x4b\xb1\x07\x8a\x9c\xd8\xac\x29\x91\xe0\x0c\x95\xf5\xfe\xfa\x82\xb2\xe4\xc8\x1f\x89\x15\xc7\x4a\xa2\x8b\x04\x61\x86\x33\xf3\xf4\xde\x23\x75\x93\x2e\xb8\x75\x15\x07\x67\x2d\x06\xb8\xc7\x50\x1b\x85\xf0\xa7\x52\x2e\x56\x3c\x85\x3b\x67\x91\xb6\xb7\xc2\x54\xda\x54\x4b\xca\xa4\x37\x0f\x18\xc8\xb8\x6a\x0e\xf5\xb7\x6c\x6d\x2a\x3d\xef\x32\xdb\xc4\xac\x44\x96\x5a\xb2\x9c\x67\x00\x95\x2c\x71\x0e\x8a\x8c\x58\x2a\x14\x5e\x0b\xb5\x2b\x28\x48\x66\x99\x10\x22\xbb\x81\x9f\x01\x1f\xe7\xb0\x62\xf6\x34\xcf\xf3\xa5\xe1\x55\x2c\x66\xca\x95\xf9\x3a\x16\x18\x2a\x64\x24\xa1\xc8\xe4\xf8\x93\x31\x54\xd2\x0a\x1f\x5c\x6d\x52\x17\x18\xf2\xc2\xba\x22\x2f\x25\x31\x86\x5c\xa3\xb7\x6e\xd3\x4b\xcb\x43\x21\xd5\x6c\x23\x4b\xdb\xf6\x7a\x6b\x63\x8a\x4c\x43\xed\x0d\xd3\xc4\xc9\xc8\x2b\x17\xcc\x2f\xc9\xc6\x55\xb3\xf5\x77\x9a\x19\x97\xd7\xdf\x5e\x9f\xa8\xd7\x8b\x08\x69\xd9\x10\x2d\x52\x0a\x15\x20\xbd\xf9\x3b\xb8\xe8\x69\x0e\xff\x4e\x26\x3f\x32\x00\x80\x80\xe4\x62\x50\xd8\xbc\xf3\xa9\x3e\x31\x56\x5c\x3b\x1b\x4b\xa4\x36\xa8\xc6\x50\x34\x01\x4b\xe4\xc9\x14\x26\xd6\x50\x73\x7f\x92\xac\x56\xe9\x41\x05\x94\x8c\xe9\x49\xa3\x45\xc6\x26\xef\x82\x82\xca\x4a\x53\x0e\xae\x1a\xbd\x96\xa7\x6b\x11\xbb\x20\x97\xd8\x82\x76\xaa\x72\x1b\xa1\xac\x24\x1a\x38\xe7\xc0\x99\xb0\xc6\x8a\x8f\x56\x7c\x05\xb2\x76\x8c\x29\x4c\xfc\x4b\x75\xce\xcf\xa3\xc8\x54\x4e\x5f\x77\x92\x77\x2d\x48\x95\xf4\xb4\x72\x3c\x3b\xdf\x7b\x4b\xb6\x36\xe1\xd5\x8a\xd7\x2c\x94\xc4\x7f\xea\x53\xed\xd7\x6b\x4c\xe1\x58\xae\x7f\x6d\x3d\xe8\x4a\xaa\xed\xf9\x50\x5f\xc0\xad\xd1\x65\x14\x8b\xff\x50\x71\x2b\xe3\x93\x36\x97\x26\x38\x63\x6f\xc9\x0e\xee\xf0\x31\x2d\x72\xec\x3e\x43\xac\x04\x76\xc0\xbf\x32\x6b\x06\x70\xa9\x8f\x4a\x66\xa9\x56\x5f\xc0\x44\xbb\x46\x3e\xd0\x41\x07\x98\xc0\xf5\x25\x3a\xba\xab\x0c\x95\xe4\x16\xf0\xf2\x9c\x1a\xdf\x08\xd8\x05\xd5\x73\x62\xc9\xf1\xa8\x89\xae\xc4\x47\x9b\xc1\x8e\x88\x9f\xeb\x04\xfb\x7a\x18\x66\x03\x5b\xa8\x6e\xe0\x0e\xc9\xfc\xc2\x00\x65\x24\x86\x02\x41\x16\x16\x81\x1d\x3c\xb9\xb0\x86\x27\xc3\x2b\x58\x3c\xdc\xd2\x14\x16\x0f\x34\x85\xfb\x5b\x9a\x8d\xa6\xea\xb0\xed\xe4\xeb\x8b\xfa\x1d\xa7\xa2\x77\x55\x78\x81\xfc\x17\x4e\x71\x85\x73\x50\xc3\xa1\x91\xd5\xd6\xb1\xe2\x73\x15\xb6\xc7\xcd\x37\x08\xec\x82\x5d\xb6\x3b\xfe\xf0\xd9\x8d\x36\xf5\xd7\x8f\x4e\xbd\x88\x83\x97\xe3\x6e\xc4\xbd\x42\x6f\x97\xed\x28\x07\xf1\x8b\x0f\x9b\xef\xfb\xd1\x18\xf5\x90\x7b\xda\xb2\xda\x3f\xb9\x71\xd0\x68\xbb\x1a\xec\x38\x1f\xbc\xef\xf6\x79\xf7\xb9\xc6\x70\xa4\x80\x61\xe6\xd0\xc3\xeb\x8a\x72\xb4\x28\x35\x06\xb4\xa8\x52\x56\xd7\x4f\x0a\x23\x2f\x15\xce\xa1\x1b\x99\x8c\xd0\xc1\xd4\x18\x32\x00\x2b\x0b\xb4\x0d\x6a\x00\xeb\xef\x24\xa4\xf7\x29\xd0\x0b\xe5\x4a\x1f\x19\xc5\xf3\x66\x24\xb4\xa1\x75\x3f\xbb\x95\xfb\x01\xed\x94\x73\x41\x9b\xaa\xdf\x7a\xc3\xa0\x3d\xc2\x59\x94\x9d\xde\x0e\x49\xbf\x23\x7b\xc7\xfe\x67\xaa\x3f\x33\xaf\xb5\x85\x1f\x87\x50\x8e\xc6\xb9\x03\x70\x3b\xda\x8d\x8a\xef\x33\xa7\x5f\x64\xf4\x5b\xf9\xfc\x12\x91\x4f\x73\x67\x30\x97\x6f\xc0\x07\x53\x1b\x8b\x7a\x89\x1a\x16\x4e\xc3\x3d\xaa\x18\x0c\x6f\x60\xe1\xac\x51\x9b\x29\xf8\x80\xb5\x71\x91\xec\x06\x34\x3e\x9a\x0a\x35\xd4\x46\xc2\x62\x9b\xb7\x44\xfd\x0f\x12\x2f\xee\x17\xc7\x06\xf2\xdb\xef\xa3\x78\x8b\x27\xdf\x80\xdd\x43\x6c\x8b\xbb\x08\xb8\x34\xc4\x41\xb6\x9a\xfe\x40\x6b\xc1\x3f\x50\x30\x12\x0b\xbf\x83\x45\x78\xf2\x03\x3f\xc5\xff\x01\x00\x00\xff\xff\xae\xfe\xa8\xd2\xa7\x15\x00\x00") func testE2eTestingManifestsStorageCsiGcePdCsiControllerRbacYamlBytes() ([]byte, error) { return bindataRead( @@ -2477,7 +2520,7 @@ func testE2eTestingManifestsStorageCsiGcePdCsiControllerRbacYaml() (*asset, erro return a, nil } -var _testE2eTestingManifestsStorageCsiGcePdNode_dsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x57\x5d\x8f\xdb\x36\x10\x7c\xf7\xaf\x58\x38\xaf\x95\x94\xa0\x28\x5a\x08\xb8\x87\x26\x71\xd3\x22\xcd\xa5\xc8\xa5\xed\x43\x51\x04\x34\x39\x96\x08\x53\x24\x4b\xae\x74\xf6\xbf\x2f\x68\xc9\x3e\x7f\xc8\xba\xbb\x20\x04\xee\x60\xf0\x63\x76\x96\x3b\xb3\xa6\xd7\xda\xaa\x92\xde\x0a\x34\xce\xde\x81\x67\xc2\xeb\xbf\x10\xa2\x76\xb6\x24\xe1\x7d\x2c\xba\x57\xb3\x06\x2c\x94\x60\x51\xce\x88\xac\x68\x50\x92\x8c\x3a\xab\x24\x32\xaf\x32\xeb\x14\x66\xd1\x43\xa6\xd5\x08\x03\xc9\x2e\xa4\xcf\x44\x8d\x60\x59\xff\x2e\x96\x30\xb1\x9f\xa0\x04\x59\x52\x25\x7d\x26\x5d\xe3\x5b\x46\xe6\x53\xb0\xc8\xb0\x9c\x29\x1d\xd7\x59\x42\x56\x41\x77\x08\x33\x22\x46\xe3\x8d\x60\x0c\x70\x47\x34\xd2\x30\x27\xc8\xcf\xc5\x26\xda\x93\x4e\xe3\x05\xfd\xea\x22\x93\x05\xdf\xbb\xb0\xa6\xa6\x8d\x4c\x4b\x50\x1b\xa1\x68\xe5\x02\x69\xcb\x08\x42\xb2\x76\x96\xee\x35\xd7\xf4\xb7\x0b\x6b\xe3\x84\xa2\xdf\x14\x2c\x6b\xde\x92\xb6\xf4\xee\xfd\xe2\x80\x17\xb5\x95\x20\xcd\x14\xe0\x8d\x90\x88\xf4\xee\xcd\x82\x3e\x0c\x39\xd0\x1d\x42\x87\xd0\x63\xbd\x7b\x7f\xb1\x90\xd3\x27\x34\xae\xc3\x01\x8e\x6b\x1d\x29\xe0\xbf\x56\x07\x34\xb0\x4c\xf7\x35\x2c\xe9\x18\x5b\xd0\x6e\x25\x3a\xd3\x41\x91\xb0\x8a\x96\x58\xb9\x00\x12\x76\x4b\xd8\x78\x17\xdb\x00\x72\xab\x03\x54\x03\x0e\x5a\x46\xf2\x2e\x70\xcc\x87\xe9\xda\x45\xbe\xed\xb3\x2f\x89\x43\xbb\x8f\x2c\x9d\x65\xa1\x2d\xc2\xd1\x45\x67\x47\x2a\xe8\xef\x33\x0b\xa8\x74\xe4\x20\xc2\x61\x13\x91\x6e\x44\x85\x92\xd6\x3f\xc5\xbc\x92\x21\xd7\xae\x88\xba\xca\x22\xbb\x20\x2a\x14\xe9\x70\xd2\xce\x05\x42\xd9\xbd\xca\xbf\xcf\x5f\x1e\x01\x89\x50\x1d\x45\xef\x19\xcc\xb3\xac\xbb\xf9\x61\x7e\x39\x9b\x70\x85\x52\x01\x31\xde\xa4\x20\xe9\x2f\x8f\x4e\xae\x47\xf6\xae\xdb\x25\x0c\xf8\x10\x3b\x95\x37\xf3\x82\xeb\x9b\xa2\x13\xa1\x30\x7a\x59\x0c\x5b\x0a\x6f\xda\x4a\xdb\x58\x78\x95\xef\x00\xfb\x2c\xf2\x6a\x8d\x94\xd8\x58\x0c\xa3\x57\x90\x5b\x69\x70\x4a\xdd\x07\xdc\xb1\xf3\xa7\x93\x44\xd8\x3c\x88\xf1\x61\x48\xd7\x34\x22\x19\xf4\x9f\x79\xb1\xd4\xb6\x88\xf5\xfc\x3b\x9a\x67\x32\xfd\x0f\x0d\x65\x61\x45\xc5\x31\xf9\x71\x7a\x4f\xd9\x93\xee\xa0\x4f\xe1\xdf\x23\x16\xb0\xdd\xf9\xc5\xf7\xa5\x7f\xff\xe7\xeb\xc5\x97\xdb\x8f\x6f\x17\x5f\x6e\x7f\xfe\xb0\x38\xe3\xdd\x09\xd3\xe2\x97\xe0\x9a\xcb\x84\x56\x1a\x46\x7d\xc2\xea\x72\x65\x58\xfb\x43\x70\x5d\xee\xac\x99\x27\x75\xdc\x8a\x06\x47\x5b\x3b\x67\xda\x06\x1f\x5c\x6b\xf9\x42\x11\x3d\xb1\xbe\x4e\x99\xd2\xe1\x2c\x42\x93\x0e\xf5\xe8\xa9\x5c\xa3\x87\x4f\x74\x30\x0d\x71\xbc\xf5\xc2\x18\x43\x6b\x3c\xea\x35\xfd\x88\x90\x6d\xd0\xbc\x7d\xe3\x2c\x63\xc3\xe7\xc2\xd0\x9d\x36\xa8\xa0\x4e\x0c\x48\x0f\x4e\x1a\x5c\x54\xad\x91\x05\x18\x88\x88\xe2\x69\xed\xae\xec\x5e\xe6\x3f\xe6\x2f\xb3\x54\xea\xaf\x36\x16\xac\xf2\x4e\x5b\xbe\x69\xad\xde\x94\x57\xad\xf5\x78\x89\xf6\xae\x9b\xbe\xe0\x33\x03\x8e\xee\x0c\xce\x8b\x6a\x57\x82\x92\xe6\xaf\xb5\xd2\x01\xbb\x16\x2d\xcc\x39\xff\x6f\x20\x0e\x85\x4e\x4b\x3c\x72\x58\xa1\x3b\x59\x7d\x41\x9f\x6b\xd0\xca\x19\xe3\xee\xb5\xad\xfa\xbd\x91\x44\xc0\xbe\x99\x2b\x62\x47\x1c\x74\x55\x21\xec\xba\x30\xb5\x0a\x9d\x50\x0d\xad\x82\x6b\xce\xc0\x0e\xdd\x78\x94\x61\x3a\x98\x85\xd6\x20\x66\x60\x39\xc1\x12\x2c\x8b\xf6\x9c\xea\x08\x8a\xd1\xcb\x09\x94\x54\x9d\x69\x94\xa4\x8d\x2b\xa5\x1b\x5c\xd4\xda\xeb\x10\x71\x1b\x27\x8e\x3e\xac\xf6\x8a\x1b\xf9\x7e\x9a\xb0\x73\xba\xe8\x1d\xd0\xa9\x05\x47\xa5\xb7\xef\xfd\x5f\x06\xbc\x6d\x71\x72\x88\xb7\x1e\x25\xbd\xdd\x69\xcf\x85\xed\x05\x8b\x71\xb9\x3f\x87\xc0\xb3\xc2\x8d\x6a\xfc\x6b\xd2\xbd\xf2\x55\x37\x45\xe6\x63\x78\x13\x20\x18\x17\xa4\x46\xbd\x33\x49\xea\x5c\x14\xd7\xd2\xfe\x66\x06\x1b\x33\xd7\xa3\xc6\x9a\x4c\x61\xd4\x65\x8f\x95\xef\xaa\xfb\x26\x43\x8d\x5a\xf1\x49\xa1\x2e\x2c\x3a\x19\x67\xd4\xaf\x8f\xc5\x39\xf5\xf1\x24\xfe\xb9\xe5\xc7\xa1\x5f\x50\xcd\xec\x63\x59\xec\x24\x1b\x2c\x18\x31\x49\x53\x39\x19\x0b\xe9\xac\x84\xe7\xdd\x87\x95\xae\xda\xe1\xd1\x93\x2a\xcb\x99\xb0\x2a\x63\x67\x30\x4c\x1e\xf0\xee\x00\x9a\xa7\x47\x87\x16\x86\xa4\x88\x98\xe7\xf4\x39\xbd\xb8\xef\xb5\x31\x34\x9c\x00\xa1\x43\xd8\x72\xad\x6d\x95\xd3\xad\x53\x48\xef\x33\xef\x6c\x7a\x8e\xc7\xda\xb5\x46\x1d\x00\x97\xa0\x28\x6b\xa8\xd6\x40\x91\xb3\x24\x8c\xa1\xf4\x9e\x39\xbc\xb7\x1f\x58\x1c\x1a\x57\x46\xce\xa7\x39\x17\x4a\x5a\x6c\x74\xe4\xfd\x5d\xa4\x83\x77\x27\xbf\xac\xd2\x38\xcd\xdd\xc5\x92\x8c\xb6\xed\x66\xf6\x7f\x00\x00\x00\xff\xff\xd4\xd5\xd3\xa2\xd2\x0d\x00\x00") +var _testE2eTestingManifestsStorageCsiGcePdNode_dsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x57\x41\x6f\xe3\x36\x13\xbd\xfb\x57\x0c\xbc\xd7\x8f\xd2\xda\x1f\x0a\x14\x02\x72\xe8\x6e\xd2\x1e\xda\xcd\x06\x4d\xda\x4b\x51\x2c\x68\x72\x2c\x11\xa6\x38\xec\x90\xd2\xc6\xfd\xf5\x05\x2d\xc7\x91\x64\x59\x49\x16\x4b\x20\x81\xc3\x19\xbe\x99\xe1\xbc\x37\xa1\x77\xc6\xe9\x02\xae\x25\xd6\xe4\xee\x31\x2e\xa4\x37\x7f\x22\x07\x43\xae\x00\xe9\x7d\xc8\xdb\xd5\xa2\xc6\x28\xb5\x8c\xb2\x58\x00\x38\x59\x63\x01\x2a\x18\x51\x2a\x14\x5e\x0b\x47\x1a\x17\xc1\xa3\x4a\xd6\x80\x16\x55\x24\x4e\x9f\x01\x6a\x19\x55\xf5\x9b\xdc\xa0\x0d\xdd\x06\x24\xc8\x02\x4a\xe5\x85\xa2\xda\x37\x11\x85\x4f\xc1\x42\x44\x17\x85\x36\x61\x27\x12\xb2\x66\xd3\x22\x2f\x00\x22\xd6\xde\xca\x88\x47\xb8\x5e\x1a\x69\xd9\x01\xf2\x5b\xb1\x01\x9e\x92\x4e\x4b\x91\x8b\xd2\x38\xe4\x1e\x9e\xe8\x15\xdb\x1d\x13\x8c\xa5\x09\x91\x25\x9f\x9c\x00\x4c\x2d\x4b\x2c\x60\xf7\x63\xc8\x4a\xc5\x99\xa1\x3c\x98\x52\x84\x48\x2c\x4b\xcc\xd3\xe1\x74\x45\x67\x08\x45\xbb\xce\x56\xd9\xfb\x1e\x90\xe4\xb2\x17\xbd\xcb\x60\x29\x44\x7b\xf5\xc3\xf2\x7c\x37\xe1\x4a\xad\x19\x43\xb8\x4a\x41\xd2\x4f\x16\x48\xed\x26\x7c\x77\xcd\x06\x2d\xc6\x53\xec\x68\xc8\x09\x2f\x63\x75\x95\xb7\x92\x73\x6b\x36\xf9\xd1\x25\xf7\xb6\x29\x8d\x0b\xb9\xd7\xd9\x01\xb0\xab\x22\x2b\x77\x98\x0a\x9b\x89\x51\xc5\xe8\x05\x3a\xed\xc9\xb8\x78\x55\xac\xd7\xef\x57\xff\xef\xbb\x59\xb3\x45\xb5\x57\x16\x87\x15\x7a\xc6\xfb\x48\x7e\xb8\x09\x80\x8f\xcf\xad\x79\x5e\x8a\xea\x5a\x26\xba\xfe\xb5\xcc\x37\xc6\xe5\xa1\x5a\xfe\x0f\x96\x42\xa5\xdf\x5c\x83\xe0\x2d\xe4\xfd\x1a\xa7\xab\x78\x8d\x4f\xba\xaa\xae\xd2\xbf\x7b\x59\xa0\x6b\xc7\xfd\xe9\x18\xf2\xeb\x1f\x1f\x6e\xbe\xdc\x7e\xbe\xbe\xf9\x72\xfb\xd3\xa7\x9b\x51\xde\xad\xb4\x0d\xfe\xcc\x54\x9f\x17\xb4\x35\x68\xf5\xef\xb8\x3d\xb7\x1c\x6d\x77\x32\x56\xc5\x81\xa8\x59\x22\xd1\xad\xac\xb1\xe7\xea\x89\xe3\x19\x63\x4e\x4c\xbe\x23\x8e\x05\x1c\x1a\x31\x82\xef\x92\x1e\x74\x6c\xe4\xe1\x99\x22\x29\xb2\x05\x3c\x7c\xbc\x1b\x74\xb1\x45\x87\x21\xdc\x31\x6d\x46\x9d\xdc\x4a\x63\x1b\xc6\x87\x8a\x31\x54\x64\x75\x01\xab\x81\x3d\x85\xfb\x05\xe3\xb8\x54\x7f\xa8\x30\xaf\x50\xda\x58\xfd\x3b\x36\x1e\x4a\xb8\x9c\xa8\x71\x26\x1a\x69\xaf\xd1\xca\xfd\x3d\x2a\x72\x3a\x14\xb0\x7a\x3f\xf0\x89\xa6\x46\x6a\xe2\x25\xb3\x47\x36\xa4\x4f\xd6\x75\xdf\xda\x92\x6d\x6a\xfc\x44\x8d\x3b\xbf\xe6\xee\x0e\x3b\xb9\x08\x6d\x78\x94\x79\x9d\x0e\x75\xdd\x4b\xaa\x99\x3c\x3c\x90\xe3\x3c\x44\xdf\xf5\x6c\x3e\x1d\x07\x71\x6f\xb2\x75\x2b\xa0\x6a\xd8\xc4\xfd\x47\x72\x11\x1f\xe3\x58\x78\xa6\x35\x16\x4b\xd4\x05\x44\x6e\x70\x76\xa0\x29\x4b\x8d\x16\x9e\xa9\x35\x1a\x59\x94\xca\xe7\xaf\x9b\xb3\x45\xbb\xca\xd6\xd9\xfa\x5b\x87\xdc\x69\x9e\x34\xce\x3c\x16\x2f\x8d\x39\x6e\x9c\x48\xec\x67\xb2\x16\x59\x04\xe4\xd6\x28\xbc\xda\x4a\x1b\x70\xf9\xa6\xb6\x3e\x0d\xcc\xf9\xa6\x8c\x66\xe7\xa4\x27\x93\x97\xe5\xa1\x6d\x05\x2c\x3f\x18\x6d\x18\x55\xfa\x4b\xda\x71\x01\xdf\x81\x50\x1a\x53\xc1\x2f\x1c\xd6\xd8\x0e\xac\xef\xe0\xa1\x42\xd8\x92\xb5\xf4\xd5\xb8\xb2\xf3\x0d\x20\x19\x81\xf1\x9f\xc6\x30\x6a\x88\x04\x91\x4d\x59\x22\x43\x45\x21\x42\xa3\xb1\x95\xba\x86\x2d\x53\x3d\x02\x3b\x8d\x9f\xc9\x0c\xd3\x41\xc1\x8d\xc5\x20\x30\xaa\x99\x2c\x31\xaa\xbc\x19\xa7\x3a\x81\x62\xcd\x66\x06\x25\x75\x67\x1e\x25\x51\xe9\x42\xeb\x8e\xca\x6b\xdc\x65\x88\xb0\x0f\x33\x47\x9f\xad\x1d\xe3\x26\x9e\x16\x33\x23\x20\x5d\xf4\x01\x68\x28\xdb\x49\xea\x3d\xfd\xdb\xfe\x72\xc4\xdb\xe7\xc3\x11\xb8\xf7\x58\xc0\xf5\x81\x7b\xc4\xfb\xb3\x2c\xa6\xe9\xfe\x96\x04\xde\x14\x6e\x92\xe3\xdf\x52\xee\x85\x57\xca\x5c\x32\x9f\xf9\x23\xa3\x8c\x78\x96\xd4\xa4\x76\x66\x93\x1a\x93\xe2\x52\xd9\xdf\x4d\x60\x53\xe2\x7a\x51\x58\xb3\x25\x4c\xaa\xec\xa5\xf6\x5d\x54\xdf\x6c\xa8\x49\x29\xbe\x2a\xd4\x99\x44\x67\xe3\x4c\xea\xf5\xa5\x38\x43\x1d\xcf\xe2\x8f\x25\x3f\x0d\xfd\xee\xf0\x68\x09\x45\x7e\xa0\x2c\x3b\x8c\x18\x12\x35\x35\xa9\x90\x2b\x72\x0a\x7d\x3c\x7c\xd8\x9a\xb2\x39\x3e\x44\x53\x67\xa3\x90\x4e\x8b\x48\x16\x8f\x9b\x27\xbc\x7b\x44\x58\xa6\x87\xa0\x91\x16\x94\x0c\xb8\xcc\xe0\xa1\x32\x01\xbe\x1a\x6b\xe1\x78\x02\x01\x5b\xe4\x7d\xac\x8c\x2b\x33\xb8\x25\x8d\xe9\xcd\xec\xc9\xa1\x8b\x10\x2a\x6a\xac\x3e\x01\x6e\x10\x82\xaa\x50\x37\x16\x35\x90\x03\x69\x2d\xa4\x37\x66\xc8\x8e\x2e\xcf\x59\x9c\x06\x97\x00\xf2\x69\x8f\xb8\x80\x9b\x47\x13\xe2\xd3\x5d\xa4\x83\xf7\x83\xef\x7e\x69\x0d\x6b\xa7\x50\x80\x35\xae\x79\x5c\xfc\x17\x00\x00\xff\xff\x08\xae\xd6\x48\x74\x0e\x00\x00") func testE2eTestingManifestsStorageCsiGcePdNode_dsYamlBytes() ([]byte, error) { return bindataRead( @@ -2497,7 +2540,7 @@ func testE2eTestingManifestsStorageCsiGcePdNode_dsYaml() (*asset, error) { return a, nil } -var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathAttacherYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x53\xc1\x6e\xdb\x3a\x10\xbc\xeb\x2b\x16\x79\xd7\x47\xbb\x49\x1b\xa0\x10\x90\x43\x90\xe4\x10\x34\x4d\x0b\xb8\xe8\x7d\x4d\xad\xa5\x85\x29\x92\x25\x97\x8a\xf5\xf7\x05\x65\x37\xa6\xec\xb6\x41\x05\xf8\xb2\x9c\x19\xce\x0e\xc7\x5b\xb6\x4d\x0d\x2b\x0a\x03\x6b\xaa\xd0\xf3\x77\x0a\x91\x9d\xad\x61\xb8\xac\x7a\x12\x6c\x50\xb0\xae\x00\x2c\xf6\x54\x83\x8e\xac\x3a\x17\xc5\xa3\x74\x0a\x45\x50\x77\x14\x2a\x00\x83\x6b\x32\x31\xe3\x00\xd0\xfb\x3f\x01\xa3\x27\x9d\x41\x91\x0c\x69\x71\xe1\x4d\x02\x80\x77\x41\x0e\xc2\xea\x60\xa2\x49\x7d\x3f\x4e\x93\xfd\x71\x0d\x97\x57\xef\x3f\x5c\x57\x95\x52\xaa\x3a\x2c\x24\x28\xb4\x49\x66\x45\x32\x5b\x0a\xbd\x8f\xcb\x7f\xd8\xec\x68\x78\x0a\xe8\x79\x82\x5e\xfc\x16\x7b\x51\x01\x04\xf2\x86\x35\xc6\x1a\x2e\xcf\xb6\xec\x51\x74\xf7\x54\xe4\xf4\xc6\xe2\x42\xbd\x37\x28\x74\x60\x17\x86\xf3\x67\x66\x42\x6f\x48\x01\xfc\xda\x63\x82\x6e\x36\x6c\x59\xc6\x23\xd9\xbb\xe6\xf6\x6c\x98\xb7\xf9\x91\x38\x50\x73\x9f\x02\xdb\x76\xa5\x3b\x6a\x92\x61\xdb\x3e\xb6\xd6\xbd\x8e\x1f\x76\xa4\x93\xe4\x70\x0b\xa6\xda\xfb\x5b\xcd\x02\x38\x7e\x53\x14\x0f\x3b\x1f\x28\xe6\x67\x89\xa7\xe7\x0a\xb6\x34\x4e\x8f\x75\x72\x00\xe0\x3c\x05\xcc\x92\xf0\x68\xcf\x0e\x07\x34\x89\xce\xd4\xb2\x5e\x99\x8c\x37\xa9\xe5\x39\x59\x9c\x77\xc6\xb5\xe3\xa7\x7c\xed\x36\xad\x29\x58\x12\x8a\x0b\x76\xcb\xcc\xca\x0d\x39\xe0\x0f\x45\xb8\xd5\xda\x25\x2b\xcf\xaf\xd5\x99\xa5\x0d\xa0\x9d\x15\x64\x4b\xa1\x70\xa3\x8a\xa6\x9d\xc0\xf3\xc7\x3d\xb6\x54\xc3\xf6\x63\x5c\xb4\x3a\xe4\xab\x23\xb7\x2a\x8a\x0b\xd8\xd2\xb2\x24\xd5\xc3\xd5\xe2\x6a\xf1\xae\xe0\x62\x68\x4f\xd6\x56\xa0\xd4\x70\x73\x7d\x36\x9b\x74\x9a\x26\x27\x7f\x93\x45\xf3\x6f\x11\x9d\xde\x16\xc8\x48\x3a\x05\x96\xf1\xce\x59\xa1\x9d\xcc\x85\xff\x83\x6f\x1d\x47\xe0\x08\x96\x34\xc5\x88\x61\x04\x67\xcd\x08\x1b\x17\x20\x8e\x51\xa8\x8f\xf0\xc2\xd2\xc1\xea\xe1\x89\x6d\xda\xfd\x0f\x2f\x1d\x05\x3a\x11\xb1\xce\x2a\x1f\x78\x60\x43\x2d\x35\x10\xb9\x21\x8d\xa1\xc8\x0d\x34\x5a\xeb\x04\x50\xe7\x5b\x20\x59\xde\x41\xe3\x7a\x64\x0b\xd9\x2e\xc9\x89\xa0\x0e\x84\x42\x0d\xac\x47\x28\x74\xef\x56\x8f\xd0\x04\x1e\xa8\x90\x5e\xcc\x98\x47\x70\x0d\x12\x52\xe9\x73\x70\x26\xf5\xf4\x39\x3f\x74\x9c\xb7\xbb\xcf\xb3\xaf\x28\x5d\x0d\x39\xc0\x99\xe0\xfe\x91\xf7\x1e\x55\xc3\xa1\xaa\x4a\xb5\x59\x1f\x72\xb7\x26\x95\xb9\xa3\xbd\xee\x80\x61\x69\x78\xbd\xcc\x75\x34\x24\xcb\x7d\x6d\xe3\xb2\xac\xf2\xbc\xc4\xa3\xa7\x1a\xee\x39\x4c\xff\xb9\xf1\x4b\xb8\x9b\x22\xa9\xfe\x62\xed\x67\x00\x00\x00\xff\xff\x6b\x30\xa8\x26\xfe\x05\x00\x00") +var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathAttacherYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x93\x4f\x6f\xdb\x38\x10\xc5\xef\xfa\x14\x83\xec\x75\x65\x6d\xb0\x58\x60\x21\x20\x87\x20\xc9\x21\x68\x9a\x16\x70\xd1\xfb\x98\x1a\x4b\x03\x53\x24\x3b\x1c\x2a\xd6\xb7\x2f\x28\xa7\x09\x65\xf7\x0f\x01\x5d\x86\x33\x3f\x3e\x3e\x3e\x1d\xd8\x75\x2d\x6c\x15\x95\xf6\xc9\x6e\x49\x2b\x0c\xfc\x95\x24\xb2\x77\x2d\x60\x08\xb1\x99\xae\xab\x91\x14\x3b\x54\x6c\x2b\x00\x87\x23\xb5\x60\x22\xd7\x83\x8f\x1a\x50\x87\x1a\x55\xd1\x0c\x24\x55\x0c\x64\x72\x4f\x24\x99\xd8\xd0\xf3\xd2\x7a\xf5\xd3\xde\xab\x0a\x40\x28\x58\x36\x18\x5b\xb8\x5e\x86\x2c\x19\xf5\x92\x01\x00\x23\xaa\x19\x9e\x70\x47\x36\x9e\x0a\x90\xd5\xfc\xea\x60\x00\xa5\x31\x58\x54\x7a\x9d\x2e\x04\xe7\x65\x57\xa0\x3f\xa0\x00\x7e\xdc\x63\x69\xdd\xef\xd9\xb1\xce\xef\xc3\xc1\x77\xb7\x17\xc5\x7c\x9b\x6f\x89\x85\xba\xfb\x24\xec\xfa\xad\x19\xa8\x4b\x96\x5d\xff\xd8\x3b\xff\x56\x7e\x38\x92\x49\x9a\xcd\x2d\x26\xeb\x93\xbe\xed\xca\x80\xf7\xb5\x58\xf1\x70\x0c\x42\x31\x3f\x4b\x3c\xdf\xaf\xe1\x40\xf3\xf2\x58\x67\x1b\x00\x3e\x90\x60\x46\xc2\xa3\xbb\xd8\x9c\xd0\x26\xba\xa0\x65\x5e\xe9\x4c\xb0\xa9\xe7\xf5\xb0\xfa\xe0\xad\xef\xe7\x0f\xf9\xd8\x43\xda\x91\x38\x52\x8a\x1b\xf6\x4d\x9e\xca\x09\x79\xed\x7f\x0d\xc2\xad\x31\x3e\x39\x7d\x7e\x8b\xce\xca\x6d\x00\xe3\x9d\x22\x3b\x92\x42\x4d\x5d\x24\xed\xac\x3d\x2f\x1e\xb1\xa7\x16\x0e\xff\xc7\x4d\x6f\x24\x1f\x1d\xb9\xaf\xa3\x7a\xc1\x9e\x9a\x72\xa8\x9d\xfe\xdd\x5c\x6f\xfe\x29\x66\x51\xfa\xb3\x6b\xd7\x50\xd7\xd3\xcd\x7f\x17\xb5\x85\xd3\x75\xd9\xf9\x9b\x0c\xcd\xdf\x26\x7a\x73\x28\x3a\x23\x99\x24\xac\xf3\x9d\x77\x4a\x47\x5d\x83\xff\x82\x2f\x03\x47\xe0\x08\x8e\x0c\xc5\x88\x32\x83\x77\x76\x86\xbd\x17\x88\x73\x54\x1a\x23\xbc\xb0\x0e\xb0\x7d\x78\x62\x97\x8e\x7f\xc3\xcb\x40\x42\x67\x10\xe7\x5d\x1d\x84\x27\xb6\xd4\x53\x07\x91\x3b\x32\x28\x85\x6f\x60\xd0\x39\xaf\x80\x26\x9f\x02\xc9\xf1\x11\x3a\x3f\x22\x3b\xc8\x72\x49\xcf\x80\x46\x08\x95\x3a\xd8\xcd\x50\x70\xef\xb6\x8f\xd0\x09\x4f\x54\xa0\x37\xab\xc9\xf7\xe6\x16\x54\x52\xa9\x73\xf2\x36\x8d\xf4\x31\x3f\x74\x5c\xa7\x7b\xcc\xb5\xcf\xa8\x43\x0b\xd9\xc0\x15\xf0\xf4\xc8\x27\x8d\x75\xc7\x52\x55\x25\x6d\x95\x87\x9c\xad\x85\xb2\x56\x74\xe2\x4e\x28\x8d\xe5\x5d\x93\xe3\x68\x49\x9b\x53\x6c\x63\x53\x46\x79\x1d\xe2\x39\x50\x0b\xf7\x2c\xcb\x3f\x37\x7f\x92\xbb\xc5\x92\xea\x37\xd2\xbe\x07\x00\x00\xff\xff\x6a\xbe\xfd\x6d\x2d\x05\x00\x00") func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathAttacherYamlBytes() ([]byte, error) { return bindataRead( @@ -2537,7 +2580,7 @@ func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathDriverinfoYaml( return a, nil } -var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathPluginYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x58\x6d\x6f\xdb\x36\x10\xfe\xee\x5f\x71\x48\x0a\xb4\x03\x2a\x2b\xe9\x1b\x52\x01\xfe\xb0\x26\xd9\x56\xac\x4d\x82\x25\xeb\x3e\x06\xb4\x78\xb6\x0e\xa1\x48\x81\x3c\x29\xf1\x7e\xfd\x70\x94\xed\x48\xb6\xe2\xb8\xc5\x06\xcc\x40\x81\x96\xbc\x37\xde\xdd\xf3\xdc\xa9\x87\x70\x8d\xbe\xa1\x1c\x41\xe3\x8c\x2c\x6a\x28\xd0\xe3\x6b\xa8\x4c\x1d\x20\xb4\x57\x17\xaa\x44\x98\xa2\x71\xf7\x40\x16\xae\x59\x31\xce\x6a\x73\x8d\xfc\x7a\x74\x08\xca\x23\x58\x44\x8d\x1a\x9c\x35\x0b\x98\x62\xae\xea\x80\xe0\x66\x90\x3b\xab\x89\xc9\x59\xc0\x87\xca\xa8\x68\x9d\xec\xe8\x10\x0a\xe6\x2a\x64\x69\x3a\x27\x2e\xea\xe9\x38\x77\x65\x7a\x57\x4f\xd1\x5b\x64\x0c\xdd\xbf\x52\x08\x35\x86\xf4\xc3\xc7\x0f\x47\x27\xa3\xd1\x1d\x59\x9d\xad\xe2\x1d\xa9\x8a\xbe\xa1\x0f\xe4\x6c\x06\xcd\xf1\xa8\x44\x56\x5a\xb1\xca\x46\x00\x56\x95\x98\x41\x1e\x28\x29\x5c\xe0\x4a\x71\x51\x99\x7a\x4e\x76\x04\x60\xd4\x14\x4d\x10\x21\x00\x55\x55\x83\x52\xa1\xc2\x5c\x24\x02\x1a\xcc\xd9\xf9\xdd\xd2\x00\x95\xf3\xbc\x34\x99\x2c\x7d\xeb\xba\x2c\x17\xf1\xa4\xbd\xce\xe0\xf8\xcd\xdb\x77\xef\x47\x49\x92\xac\x9e\xf1\x98\xc6\xde\x53\x54\x55\x85\x74\xdf\xf7\x3c\x46\xba\x2e\x54\x06\x07\xdb\x82\x07\x23\x80\x43\xb8\xb4\x08\x1e\x2b\x43\xb9\x8a\xb5\xca\xe2\xe9\x6f\x2e\x30\x88\x28\x68\x4f\x0d\xfa\xb6\x8c\xf7\xce\xdf\x05\xb8\x2f\xd0\x02\x36\xe8\x17\x5c\x90\x9d\x83\xaf\x6d\x88\x4a\xce\x82\x82\x40\x76\x6e\x10\xac\xd3\x38\x86\xbf\x10\x54\x5e\x10\x36\x08\x5c\x28\x86\xe9\x02\x02\x2b\xcf\xa2\x46\x0c\xce\xe6\x08\xca\x6a\xe0\x02\x6d\x34\x91\xbb\xc4\xb8\x5c\x31\x82\x32\x06\x1c\x17\xe8\xa1\x72\x3a\x40\x43\x0a\xc8\x32\xfa\xa4\x72\x1a\xd4\x6c\x46\x96\x58\x92\xb9\x8c\x3d\x64\x70\xbc\x55\x9d\x52\x71\x5e\x7c\xe9\x14\x77\x57\xc1\x18\xcb\xca\x28\xc6\xa5\x6a\x27\xd3\xf2\x33\x3d\x2b\xbb\xec\x00\xac\x0a\x20\xbf\xdc\x59\x96\x26\xf7\x1d\xdd\x55\x3f\x48\x8e\x92\x36\xbf\x89\xc7\x39\x05\xf6\xca\xaf\xa5\x00\xa8\x54\x73\xcc\xe0\xee\x24\x8c\xe7\xb9\x1f\x93\x4b\x03\xcd\x93\xc0\xce\xab\x39\xa6\xe2\x7c\xd0\x42\xd6\x1c\x8f\xdf\x8e\x8f\x3a\x86\x94\x9f\x77\xdc\xb7\x21\x24\x49\x33\x79\xbf\x75\x26\x46\x95\xd6\x1e\x43\x98\x88\x07\xf9\x33\x0e\x2e\xbf\xdb\x92\x14\x40\x1a\xe4\xb5\x5b\x81\x74\x22\x89\x98\xa4\x8d\xf2\xa9\xa1\x69\xba\x14\x49\xdb\xd4\x84\xb4\x9b\xae\x21\xc3\x01\xf3\xda\x13\x2f\x4e\x9d\x65\x7c\xe0\x7e\xc4\x87\x70\x53\x50\x00\x0a\x60\x31\xc7\x10\x94\x5f\xb4\x4d\x39\x73\x1e\xc2\x22\x30\x96\x01\xee\x89\x0b\xb8\x3e\xff\x42\xb6\x7e\x78\x2d\x8d\xea\x71\xc3\x88\x95\x28\x3d\x35\x64\x70\x8e\x1a\x02\x69\xcc\x95\xef\x94\x09\x72\x65\xad\x63\x50\xb9\x78\x81\xda\xd2\x03\x68\x57\x2a\xb2\x20\xe1\x22\x6f\x18\xcc\x3d\x2a\x46\x2d\xad\xdd\xb1\x7b\x7a\xfd\x79\x85\x9c\xb5\xe9\x71\x4f\xf3\x51\x38\x03\xf6\x75\x37\x4e\xb4\xcd\x66\xb5\xda\x86\xf9\xfd\xcf\x4f\xe7\xb7\x17\x97\x67\xe7\xb7\x17\x3f\x7f\x3d\xef\x89\x00\x34\xca\xd4\xf8\x8b\x77\x65\xb6\x71\x01\x30\x23\x34\xfa\x0f\x9c\x6d\xdf\x48\x23\xf7\x18\x73\x5b\x20\x2a\x5f\x29\x2e\xb2\xd8\xd8\x63\xe9\x39\xe1\x94\x8e\x68\xe3\x4c\x5d\xe2\x57\x57\x5b\xee\xf5\x59\x02\xa5\x9c\xb5\xca\x52\xf1\x9e\xf9\xf6\x4d\x6d\x52\x13\x4d\xfe\x49\xc5\x6e\x8b\x0d\x58\xe8\x75\xe0\x2e\x3b\xd2\x7f\x82\xe9\x0d\xa1\x2e\x95\xae\xaf\xb7\xc0\xba\x6a\xdc\xbd\xf1\xd9\x27\x06\x01\xe5\xbb\x67\x41\x79\x90\x2c\xd1\x2c\x3e\x27\x2b\x0b\x63\xc1\x8a\x78\x21\x77\xb0\xad\xd0\x4c\xde\x0f\x9c\xa2\xd5\x95\x23\xcb\x93\x17\xaf\x4e\xaf\x3f\xdf\x9e\x5f\x9c\x5d\x5d\x7e\xbe\xb8\xf9\x69\x40\x54\x0a\x4a\x7a\xf2\xe2\x55\xbf\xbd\x36\x44\x05\x7f\xd8\x22\x4e\xd3\x6c\x86\x1e\x85\xbc\xd9\xc1\xe0\xbc\x4e\x96\xdc\xb1\x22\x27\x79\x4a\x64\x87\x54\x63\x65\xdc\x62\xc3\x74\x02\xf7\x08\x85\x6a\x10\x14\x30\x06\x0e\xed\xb8\xc8\x0b\xcc\xef\x42\xe4\x49\x30\x54\x12\x87\xf1\x76\xf8\xa5\x7a\x68\xfb\x2f\x54\xe8\x45\x74\x72\x7c\x74\xb0\x0f\x9a\xba\x79\x19\xc2\x52\x16\xc1\x9f\xa5\xe9\x2e\x22\xfc\x7f\x03\x73\x27\xa3\xee\x60\xa0\xce\xf2\xb2\x7a\xe8\x9a\xc6\xae\xe2\xe6\xf2\xf1\xe4\xe3\xc9\x00\x86\x0a\x54\x86\x8b\xbf\x37\xfc\x38\x76\xb9\x33\x19\xdc\x9c\x5e\x75\x6e\x0c\x35\x68\x31\x84\x2b\xef\xa6\xd8\x0f\x6d\xa6\xc8\xd4\x1e\x6f\x0a\x8f\xa1\x70\x46\x67\xd0\x1f\x55\xb2\x2b\xfe\x8a\xbc\x99\xb6\xaa\xc5\xf9\x50\x10\xab\x8d\x6b\xe8\x4e\x56\x09\x52\xe6\x0c\x8d\x5a\x5c\xa3\x2c\xa8\xb2\x4d\x1c\xf5\x64\x98\x4a\x74\x35\xaf\xaf\xdf\xf6\x9f\x88\x9e\x9c\x5e\x5f\xbe\xd9\x83\x1b\x9f\x63\xc7\x9d\xfc\xb8\xa9\xbc\x35\x71\x9d\x0e\x1b\xd6\x5a\x71\xef\x2a\x35\x8f\x4c\x99\xc1\x27\xd2\xe4\x31\x97\x7f\x28\x33\xe8\x3b\xea\x44\x1a\xf9\x5e\xff\xed\xc4\xff\x17\x42\x58\x5a\x7a\xc6\xff\x93\xcc\x3e\xc8\xed\x4f\xdb\xd1\xd8\x0c\xaa\x6b\x6c\x7a\x9a\x2b\xdc\xaf\x5a\x38\xa9\xa4\x87\xff\xcb\x89\xb8\x7b\xd6\xac\xe2\x88\x61\xc8\xa8\x39\xde\x39\x6a\xf6\xdd\xf4\x44\xae\x05\x4c\x22\xe8\x99\x44\xd0\x8f\xba\x2f\xec\x6d\xb5\x42\xf2\xf1\x65\x7d\x6c\xec\x6c\x91\xde\x52\xd8\x47\xdc\xa2\xc2\x0c\xce\x62\x7f\x38\xbf\xb8\xf4\xa7\x71\xd3\x1a\xed\x91\xae\xef\x0d\x65\x13\x2d\xfb\xba\x7e\x02\x1f\x3f\x96\x89\xdb\xe5\x1e\xb3\xd8\x15\xca\x56\x08\x4f\x2e\x3f\x3f\x16\xc4\x77\xf9\x1e\xc2\xe6\x53\x6e\x0f\xe1\xa5\x78\x7e\x29\x0b\x7c\xdc\xcb\xe1\xea\x1b\x08\x22\xe5\xa0\x92\x39\x17\x38\xfe\x6f\x41\xd4\x1f\x6f\xe8\xd6\xf2\x55\x09\x29\x97\x95\x88\x2b\x13\x1c\x54\x2e\x04\x9a\x1a\x84\xfb\x82\x8c\x7c\x5e\x8a\x45\xf9\x02\x30\x06\xe2\xfe\xde\x28\x32\x4a\x04\xd4\x8c\xe5\x2b\x32\x06\xfb\x38\xc6\xc0\x63\x5c\xdd\xc9\x59\x70\x3e\x7a\x05\x8f\x53\xe7\x78\x57\xba\xba\xdd\x1a\x09\x25\xfd\xa1\xc6\x19\xa4\xa3\x67\x2a\xb6\xc9\x4d\xcf\x55\x67\xc5\x59\xff\x04\x00\x00\xff\xff\xb0\xb8\x68\x07\xcd\x11\x00\x00") +var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathPluginYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x58\x61\x6f\xdb\x3c\x0e\xfe\x9e\x5f\x41\x34\x03\xde\x0e\xa8\xed\x24\x5b\x8b\xd6\x40\x3f\x74\x6d\x6e\x57\xdc\x96\x06\x4b\x6f\xfb\x58\x28\x16\x13\x0b\x95\x25\x43\xa2\xdd\xe6\x7e\xfd\x41\xb6\x93\xda\x89\xe3\xa6\xbd\x0d\xb8\xb7\xc0\x80\xc0\x22\x1f\x52\x22\xf9\x90\x5c\x1f\x66\x68\x72\x11\x21\x70\x5c\x08\x85\x1c\x62\x34\x78\x02\xa9\xcc\x2c\xd8\xf2\x68\xc2\x12\x84\x39\x4a\xfd\x04\x42\xc1\x8c\x18\xe1\x22\x93\x33\xa4\x93\x5e\x1f\x98\x41\x50\x88\x1c\x39\x68\x25\x57\x30\xc7\x88\x65\x16\x41\x2f\x20\xd2\x8a\x0b\x12\x5a\x01\x3e\xa7\x92\x15\xe8\x42\xf5\xfa\x10\x13\xa5\x36\x0c\x82\xa5\xa0\x38\x9b\xfb\x91\x4e\x82\xc7\x6c\x8e\x46\x21\xa1\xad\xff\x14\xd6\x66\x68\x83\xb3\x8b\xb3\xc1\x79\xaf\xf7\x28\x14\x0f\xd7\xfe\xf6\x58\x2a\x7e\xa2\xb1\x42\xab\x10\xf2\x61\x2f\x41\x62\x9c\x11\x0b\x7b\x00\x8a\x25\x18\x42\x64\x85\x17\x6b\x4b\x29\xa3\x38\x95\xd9\x52\xa8\x1e\x80\x64\x73\x94\xd6\x09\x01\xb0\x34\x6d\x95\xb2\x29\x46\x4e\xc2\xa2\xc4\x88\xb4\xe9\x96\x06\x48\xb5\xa1\x0a\xd2\xab\x6c\xf3\x2c\x49\x56\xc5\x97\xf2\x38\x84\xe1\xe8\xd3\xe7\xd3\x9e\xe7\x79\xeb\x6b\xbc\x3c\x63\xe3\x2a\x2c\x4d\x6d\x70\xe8\x7d\x5e\x3c\xdd\x04\x2a\x84\xa3\x5d\xc1\xa3\x1e\x40\x1f\xee\x14\x82\xc1\x54\x8a\x88\x15\xb1\x0a\x8b\xaf\xff\xd4\x96\xc0\x89\x02\x37\x22\x47\x53\x86\xf1\x49\x9b\x47\x0b\x4f\x31\x2a\xc0\x1c\xcd\x8a\x62\xa1\x96\x60\x32\x65\x0b\x25\xad\x80\x81\x15\x6a\x29\x11\x94\xe6\xe8\xc3\x2f\x04\x16\xc5\x02\x73\x04\x8a\x19\xc1\x7c\x05\x96\x98\x21\xa7\x26\x08\xb4\x8a\x10\x98\xe2\x40\x31\xaa\x02\x22\xd2\x9e\xd4\x11\x23\x04\x26\x25\x68\x8a\xd1\x40\xaa\xb9\x85\x5c\x30\x10\x8a\xd0\x78\xa9\xe6\xc0\x16\x0b\xa1\x04\xb9\xc7\xac\x7c\xb7\x21\x0c\x77\xa2\x93\x30\x8a\xe2\x6f\xb5\xe0\x76\x05\x8c\x30\x49\x25\x23\xac\x54\x6b\x2f\xed\xfe\x64\x03\xa5\x0b\x07\x60\x1d\x80\xe2\x77\x19\x84\xab\x28\xd2\x99\xa2\x52\x05\x9f\x09\x8d\x62\xd2\x8b\x91\x49\x8a\xbd\x44\x2b\x41\xda\x78\x91\x56\x64\xb4\x94\x68\x2a\x65\xf7\xc1\x55\x88\xa9\x19\xf6\x6a\x81\xdf\x07\xc4\x96\xa8\x68\xa3\x01\x20\x12\xb6\xc4\x10\x1e\xcf\xad\xbf\x8c\x8c\x2f\x74\x60\xc5\xd2\xb3\xa4\x0d\x5b\x62\xf0\x2a\x52\x98\x0f\xfc\x91\x3f\xa8\x01\x32\xb3\xac\xb9\x54\xba\x75\xe4\x79\xf9\xe5\xe9\xd1\xee\x57\x87\xcf\x38\x37\x68\xed\xe5\x87\xe3\xab\x9b\x9b\x1f\xe3\xd9\xec\x63\x5d\x10\x55\xbe\x8d\x56\x5e\x72\x72\x77\x33\x7e\x98\x5c\x7d\x1f\x37\x4e\x01\x72\x26\x33\xfc\x87\xd1\x49\xb8\x75\x00\xb0\x10\x28\xf9\x0f\x5c\xec\x9e\x54\x67\x53\x46\x71\x58\xc4\xc8\x77\x49\xea\xca\xa3\xd5\x76\xe5\x68\x9b\xe5\x10\xdc\xa3\xb9\x7f\xbe\xd5\xd1\xe3\xf6\x4b\x4f\x33\x29\xa7\x5a\x8a\x68\x15\xc2\xd1\xed\x62\xa2\x69\x6a\xd0\xa2\xa2\xfa\x9d\x73\x2d\xb3\x04\xbf\xbb\xac\xd8\x79\xca\xd2\x01\x07\x8d\xe4\x71\x61\xb6\x7c\x48\x9c\x52\x79\x0d\xe7\xc3\x9b\x52\x63\x27\xc7\xe0\x7f\xca\x8f\x17\xb8\x3f\x9e\x24\x95\xa0\x44\xc6\xd1\x78\x45\x9d\x0b\xad\x0e\x4a\xa3\xbf\x6f\x28\x5d\x86\x7a\x25\x05\x7b\x06\x97\xc2\x92\x61\x6f\x8b\x5c\x2b\x42\x98\x8f\xfc\x81\x3f\x7c\x25\x58\x45\xac\x76\xbe\xd5\x23\xb5\xef\xed\x4a\x49\xd7\xb3\x25\xd2\xc6\xac\x8b\x97\xe7\xb8\xf2\x32\xc8\x99\x09\xa4\x98\x07\x95\x48\x50\xb2\xa7\x0d\xea\x8c\xda\x06\x6c\x31\xca\x8c\xa0\xd5\xb5\x56\x84\xcf\xd4\xf4\xb8\x0f\xf7\xb1\xb0\x20\x2c\x28\x8c\xd0\x5a\x66\x56\x65\xdf\x5a\x68\x03\x76\x65\x09\x13\x0b\x4f\x82\x62\x98\x8d\xbf\x09\x95\x3d\x9f\xb8\x5e\x66\x70\x0b\x44\x39\x2f\x8d\xc8\x85\xc4\x25\x72\xb0\x82\x63\xc4\x4c\x8d\x8c\x21\x62\x4a\x69\x02\x16\x39\x2b\x90\x29\xf1\x0c\x5c\x27\x4c\xa8\x2a\xd0\x5b\x80\x91\x41\x46\xc8\x5d\xf7\xab\xe1\x5e\xcf\x6e\xd7\xcd\x75\x03\xed\x37\x34\x5f\x84\x43\x20\x93\xe1\x21\xa9\xfe\xaf\x7f\x7f\x19\x3f\xfc\x66\xda\xdc\x1a\xaa\x76\x05\x0e\xe0\xd5\x7d\x35\xe2\xed\x2d\x01\xf7\xd7\x51\x3d\x4d\xc5\x7a\x8a\xb5\x20\x34\x32\xb0\x0b\xc7\xe5\x9f\x6b\xfb\x3b\xa5\xfa\xc2\xac\x9b\xe3\x9d\x62\x5d\x27\x6e\x4d\xb1\x0f\xbf\xae\x7e\x4c\x6e\x27\x5f\x43\x48\x98\xca\x98\x94\x2b\xe0\xfa\x49\x2d\x0d\x73\xc3\xf1\xc2\xe8\x04\x86\xfe\x99\x3f\x02\xd2\x30\xf4\x3f\xfb\x83\xcd\xac\x3c\xf4\x4f\xfd\xe7\x62\x3a\x72\x02\xcf\x10\xb3\x1c\x1b\xc8\x0c\xe6\xd9\xb2\x9c\xab\x0a\x0d\x0b\xe3\xd1\x18\x08\x6d\x31\x5e\x91\x86\x05\x13\x12\x8e\x3b\xe7\x6a\xaf\x2a\xe0\x35\x43\xb8\x2b\x14\x25\x1a\xa4\x99\x94\xc1\x68\x38\xe8\x73\x61\xa3\xcc\xba\xe8\x3f\x98\xb3\xc1\xe9\xe9\xc5\xe8\xf3\xa7\xf3\x93\x86\x27\xef\x34\x51\x0d\xf2\xa3\xd3\xe1\x47\xff\x60\x4e\x6b\xce\x5b\x61\x5e\x3c\xda\xeb\x5d\xa7\x34\xee\xe2\x74\xb9\x46\xf0\x1d\xbf\x38\x2b\x42\xb7\xf4\x99\xf6\x36\x85\x8a\xa7\x5a\x28\xba\xfc\x70\x7c\x3d\xbb\x7d\x18\x4f\x6e\xa6\x77\xb7\x93\xfb\xb6\x46\xe5\x8a\x40\xf0\xcb\x0f\xc7\xcd\x92\x3c\x6c\xf0\xa9\x83\xb7\xb7\x2d\xc7\x3a\x61\x10\x74\x31\xf0\xff\x37\x23\x74\x52\x79\x07\xf5\xd5\x16\xab\xf5\x45\x37\xfc\x39\x2d\xb6\xaa\x8b\xf3\x8b\xf3\x96\xe2\x2d\x47\x97\xff\x6c\xd9\xd1\xa4\x23\x2d\x43\xb8\xbf\x9e\xd6\x4e\xa4\xc8\x51\xa1\xb5\x53\xa3\xe7\xd8\x74\xcd\x55\x55\x66\xf0\x3e\x36\x68\x63\x2d\x79\x08\xcd\x1e\xe9\x8a\xe1\x2b\xd2\xf6\xb3\xa5\x25\xc1\xb4\x39\xb1\xde\x06\xdb\xce\xdc\x9a\x23\x98\xbc\x41\xc9\x56\x33\x74\xcb\xb3\xdb\x74\x06\x0d\x19\x12\x09\xea\x8c\x36\xc7\x9f\x9a\x57\x44\x23\x34\xdf\x1c\x8e\x0e\x1c\x5c\xba\x68\xb9\x93\x98\xb7\x95\x77\x5a\xbd\xe6\xb6\x75\x0a\x32\x3a\x65\xcb\x82\xa2\x43\xf8\x22\xb8\x30\xe5\x84\xc7\x64\xab\xed\x42\xa7\xa8\xc5\xb7\xda\x2f\x47\x8d\xdf\xe0\x42\x85\xf4\x8a\xfd\xbd\x2d\xa5\xb5\xa9\xec\xc7\xe1\x98\xb7\xaa\x73\xcc\x1b\x9a\xeb\xba\x5f\xa7\xb0\x97\xba\x1c\xfe\x93\xad\xb8\x9b\xb0\xd7\x7e\x14\x6e\xb8\xc1\xb3\x7b\x4b\x38\x74\xc4\x74\x72\xd5\x32\xe2\xaa\xe7\xb2\x28\xfa\x5e\xfd\x86\x8d\xa5\xd9\x91\x7e\x71\xb3\x66\x6d\x74\xa6\x48\x63\x1a\x6d\x56\xdc\x2a\xc5\x10\x6e\x8a\xfc\xd0\x66\x75\x67\xae\x8b\x11\xaf\x77\xc0\x73\xbd\xd5\x95\xed\x6a\x39\xd4\xf4\x9e\xfa\x78\xdf\x4b\x3c\x54\x03\xd4\xaa\xcb\x95\x1d\x17\xf6\x4e\x5d\xef\x73\xe2\x4d\xb6\xdb\x6a\x73\x9f\xd9\x3e\xfc\xe5\x2c\xff\xe5\x36\x87\x62\x21\x80\xe9\x4f\x70\x15\xe9\x3e\xa4\xae\xcf\x59\x2a\xfe\x27\xb3\xd0\xf7\xb7\x74\x33\xeb\xc6\xad\x80\x92\xd4\x89\x33\x69\x35\xa4\xda\x5a\x31\x97\x08\x4f\xb1\x90\x08\x14\x3b\x44\xb7\x7a\x48\x09\xc5\xe2\x90\x33\x21\x99\x13\x60\x0b\x42\x53\x39\xfb\xd2\xc6\xc0\x60\xb1\x33\x08\xad\x40\x9b\xc2\x2a\x18\x9c\x6b\x4d\x5d\xcf\x55\xcf\xd6\x82\x50\x82\x77\x25\x4e\x2b\x1d\xbd\x12\xb1\x6d\x6e\x7a\x2d\x3a\x6b\xce\xfa\x6f\x00\x00\x00\xff\xff\x66\x1f\x32\x1e\x69\x16\x00\x00") func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathPluginYamlBytes() ([]byte, error) { return bindataRead( @@ -2557,7 +2600,7 @@ func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathPluginYaml() (* return a, nil } -var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathProvisionerYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x53\xc1\x6e\xdb\x3a\x10\xbc\xeb\x2b\x16\x79\xd7\x47\xfb\xf9\xb5\x29\x0a\x01\x3e\x04\x49\x0e\x41\xd3\xb4\x80\x83\xde\xd7\xd4\x5a\x5a\x98\x22\x59\x72\xa9\x58\x7f\x5f\x50\x76\x12\x49\x46\x93\x96\x80\x2e\xe4\xcc\x70\x76\x38\xda\xb3\xad\x4a\xd8\x50\xe8\x58\x53\x81\x9e\x7f\x50\x88\xec\x6c\x09\xdd\xaa\x68\x49\xb0\x42\xc1\xb2\x00\xb0\xd8\x52\x09\x3a\xb2\x6a\x5c\x14\x8f\xd2\x28\x1f\x5c\xc7\x19\x4c\x01\x0a\x00\x83\x5b\x32\x31\x63\x01\xd0\xfb\xb7\xc0\xd1\x93\xce\xc0\x48\x86\xb4\xb8\xf0\x47\x24\x00\xef\x82\x9c\x2e\x50\x27\x43\x55\x6a\xdb\x7e\xd8\x39\x1e\x97\xb0\xfa\xff\xc3\xc7\xcb\xa2\x50\x4a\x15\xa7\xe1\x04\x85\x76\xc9\x6c\x48\x26\x03\xa2\xf7\x71\xf9\x57\x53\x8e\x7c\x0f\x79\x3d\x0c\xe8\x8b\xdf\xc1\x2f\x0a\x80\x40\xde\xb0\xc6\x58\xc2\xea\x6c\xde\x16\x45\x37\xf7\xa3\xd4\xde\x89\xa0\x00\x10\x6a\xbd\x41\xa1\x93\xc0\xc8\x79\x5e\x66\xa2\xf5\xbe\x1a\xc0\xf3\x40\x03\x7a\xb7\x63\xcb\xd2\xbf\xf2\xbd\xab\xae\xce\x36\xf3\x4c\x3f\x13\x07\xaa\x6e\x52\x60\x5b\x6f\x74\x43\x55\x32\x6c\xeb\xbb\xda\xba\x97\xed\xdb\x03\xe9\x24\x39\xe8\x11\x53\x1d\x2d\x6e\x26\x31\xbc\xae\x21\x90\xdb\x83\x0f\x14\xb3\xc7\x38\x3f\x57\xb0\xa7\x7e\x78\xb8\xd9\x01\x80\xf3\x14\x30\x4b\xc2\x9d\x3d\x3b\xec\xd0\x24\x3a\x53\xcb\x7a\xe3\x70\xbc\x49\x35\x4f\xc9\xe2\xbc\x33\xae\xee\xbf\xe4\x6b\xf7\x69\x4b\xc1\x92\x50\x5c\xb0\x5b\x66\x56\x6e\xcb\x09\x7f\x6a\xc4\x95\xd6\x2e\x59\x79\x78\xa9\xd1\x3c\x70\x00\xed\xac\x20\x5b\x0a\x23\x43\x6a\x54\xbc\x73\x46\x5e\xdc\x62\x4d\x25\xec\x3f\xc7\x45\xad\x43\x36\x10\xb9\x56\x51\x5c\xc0\x9a\x96\x33\x5e\xd9\xad\x16\x9f\x16\xff\x8d\xe8\x18\xea\xd9\xfc\x0a\x54\xb7\xbe\x9c\x6f\xa9\xac\x84\x55\x95\x5f\x60\x9d\x65\xf3\xb7\x88\x4e\xef\xcf\x90\x3b\x42\x49\x81\x54\x8d\x42\x71\xfd\x78\x4a\x6a\x2d\x21\xd1\x08\x1b\x49\xa7\xc0\xd2\x5f\x3b\x2b\x74\x90\xa9\x87\x7f\xe0\xb1\xe1\x08\x1c\xc1\x92\xa6\x18\x31\xf4\xe0\xac\xe9\x61\xe7\x02\xc4\x3e\x0a\xb5\x11\x9e\x58\x1a\xd8\xdc\xde\xb3\x4d\x87\x7f\xe1\xa9\xa1\x40\x33\x11\xeb\xac\xf2\x81\x3b\x36\x54\x53\x05\x91\x2b\xd2\x18\x46\x41\x83\x46\x6b\x9d\x00\xea\x7c\x0b\x24\xcb\x07\xa8\x5c\x8b\x6c\x21\x8f\x46\x32\x13\xd4\x81\x50\xa8\x82\x6d\x0f\x23\xdd\xeb\xcd\x1d\x54\x81\x3b\x1a\x49\x2f\x26\xcc\x57\x70\x09\xb3\x1c\x3a\x67\x52\x4b\x5f\x73\x39\xce\x1e\xa2\xcd\xbb\xdf\x51\x9a\x12\x72\xdc\xb3\x9a\x1e\x9b\x71\xf4\xa9\x2a\x7e\x2e\xc5\x51\x70\xd2\xa1\x5c\xc9\x41\x66\x6a\xea\x28\xdc\x61\x58\x1a\xde\x2e\x73\x8b\x0d\xc9\xf2\xd8\xf6\xb8\x1c\xff\x01\xd3\xee\xf7\x9e\x4a\xb8\xe1\x30\xfc\xaa\xfd\xb7\x70\x3d\xa4\x52\xbc\xe1\xec\x57\x00\x00\x00\xff\xff\x01\x46\xee\x3e\x4d\x06\x00\x00") +var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathProvisionerYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x53\xc1\x6e\xdb\x3a\x10\xbc\xfb\x2b\x16\x79\xd7\x27\x1b\x7e\xc0\x03\x0a\x01\x3e\x04\x49\x0e\x41\xd3\xb4\x80\x83\xde\xd7\xd4\x5a\x5a\x98\x22\xd9\xe5\x52\xb1\xfe\xbe\xa0\xe4\x24\x92\x8c\xa6\x04\x74\x59\xce\x0c\x87\xc3\xd1\x89\x5d\x55\xc2\x5e\x51\xe9\x98\xec\x9e\x74\x85\x81\x7f\x92\x44\xf6\xae\x04\x0c\x21\x6e\xba\xed\xaa\x25\xc5\x0a\x15\xcb\x15\x80\xc3\x96\x4a\x30\x91\x8b\xc6\x47\x0d\xa8\x4d\x11\xc4\x77\x9c\x19\x24\xab\x18\xc8\x64\x58\x24\xe9\xd8\xd0\xf3\x80\xbe\xf9\x13\xfc\x66\x05\x20\x14\x2c\x1b\x8c\x25\x6c\x07\x9e\x25\xa3\x5e\xb2\x06\x40\x8b\x6a\x9a\x27\x3c\x90\x8d\xe3\x00\xb2\xa7\x4f\x8e\x07\x50\x6a\x83\x45\xa5\x8b\xc0\xc4\x79\x5e\x76\xa6\xf5\x77\x35\x80\xb7\x0b\x0d\xe8\xe3\x91\x1d\x6b\xff\xc1\x0f\xbe\xba\xbd\x1a\xe6\x3b\xfd\x4a\x2c\x54\xdd\x27\x61\x57\xef\x4d\x43\x55\xb2\xec\xea\xc7\xda\xf9\xf7\xf1\xc3\x99\x4c\xd2\x1c\xf4\x84\x59\x8c\x16\xf7\xb3\x18\x3e\xd6\x10\xc8\xc3\x39\x08\xc5\xec\x31\x2e\xf7\x0b\x38\x51\x3f\x3c\xdc\x62\x03\xc0\x07\x12\xcc\x92\xf0\xe8\xae\x36\x3b\xb4\x89\xae\xd4\xb2\xde\x34\x9c\x60\x53\xcd\x73\xb2\xfa\xe0\xad\xaf\xfb\xaf\xf9\xd8\x53\x3a\x90\x38\x52\x8a\x6b\xf6\x9b\xcc\xca\x6d\xb9\xe0\x2f\x8d\xb8\x35\xc6\x27\xa7\xcf\xef\x35\x5a\x06\x0e\x60\xbc\x53\x64\x47\x32\x31\x54\x4c\x8a\x77\xcd\xc8\x8b\x5b\xac\xa9\x84\xd3\x97\xb8\xae\x8d\x64\x03\x91\xeb\x22\xaa\x17\xac\x69\xb3\xe0\x95\xdd\x7f\xeb\xed\x7a\x3b\xa1\xa3\xd4\x8b\xfb\x17\x50\x74\xbb\xff\x97\xa3\x22\x2b\x61\x55\xe5\x17\xd8\x65\xd9\xfc\xad\xa3\x37\xa7\x2b\xe4\x91\x50\x93\x50\x51\xa3\x52\xdc\xbd\x5c\x92\xda\xa9\x24\x9a\x60\x23\x99\x24\xac\xfd\x9d\x77\x4a\x67\x9d\x7b\xf8\x07\x5e\x1a\x8e\xc0\x11\x1c\x19\x8a\x11\xa5\x07\xef\x6c\x0f\x47\x2f\x10\xfb\xa8\xd4\x46\x78\x65\x6d\x60\xff\xf0\xc4\x2e\x9d\xff\x85\xd7\x86\x84\x16\x22\xce\xbb\x22\x08\x77\x6c\xa9\xa6\x0a\x22\x57\x64\x50\x26\x41\x83\x41\xe7\xbc\x02\x9a\x7c\x0a\x24\xc7\x67\xa8\x7c\x8b\xec\x20\x5f\x8d\x74\x21\x68\x84\x50\xa9\x82\x43\x0f\x13\xdd\xbb\xfd\x23\x54\xc2\x1d\x4d\xa4\xd7\x33\xe6\x07\xb8\x84\x45\x0e\x9d\xb7\xa9\xa5\x6f\xb9\x1c\x57\x0f\xd1\xe6\xe9\x0f\xd4\xa6\x84\x1c\xf7\xa2\xa6\x63\x33\x46\x9f\x45\xc5\x6f\xa5\x18\x05\x67\x1d\xca\x95\x1c\x64\xe6\xa6\x46\xe1\x0e\x65\x63\xf9\xb0\xc9\x2d\xb6\xa4\x9b\xb1\xed\x71\x33\xfd\x03\xe6\xdd\xef\x03\x95\x70\xcf\x32\xfc\xaa\xfd\x77\xb9\x1b\x52\x59\x7d\xe2\xec\x77\x00\x00\x00\xff\xff\xa2\x95\xe5\x99\x70\x05\x00\x00") func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathProvisionerYamlBytes() ([]byte, error) { return bindataRead( @@ -2577,7 +2620,7 @@ func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathProvisionerYaml return a, nil } -var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathResizerYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x53\xc1\x6e\xdb\x38\x10\xbd\xeb\x2b\x06\xd9\xeb\xca\x4e\x76\x37\xc0\x42\x40\x0e\x41\x92\x43\xd0\x34\x2d\xe0\xa2\x77\x9a\x1c\x4b\x03\x53\x24\x3b\x1c\x2a\x56\xbf\xbe\xa0\xec\xc4\x94\xdd\xa6\x25\xa0\xcb\xf0\xcd\xe3\x9b\x37\x4f\x5b\x72\xa6\x81\x15\xf2\x40\x1a\x2b\x15\xe8\x2b\x72\x24\xef\x1a\x18\xae\xaa\x1e\x45\x19\x25\xaa\xa9\x00\x9c\xea\xb1\x01\x1d\xa9\xee\x7c\x94\xa0\xa4\xab\x19\x23\x7d\x47\xae\x00\xac\x5a\xa3\x8d\x19\x06\xa0\x42\xf8\x05\x2e\x06\xd4\x19\x13\xd1\xa2\x16\xcf\xbf\xc3\x03\x04\xcf\x72\xa0\xad\x0f\x0a\x4c\xea\xfb\x71\xaa\xec\xaf\x1b\xb8\xfa\xe7\xdf\xff\xae\xab\xaa\xae\xeb\xea\x30\x8d\x28\xc1\x4d\xb2\x2b\x94\xd9\x44\x2a\x84\xb8\xfc\xf3\xb1\x8e\x72\x27\x73\x9e\x27\xe4\xc5\xcf\xa0\x17\x15\x00\x63\xb0\xa4\x55\x6c\xe0\xea\x6c\xc4\x5e\x89\xee\x9e\x0a\x8f\xde\x9f\x5a\xb0\x0f\x56\x09\x1e\x9a\x0b\xb5\xf9\xd8\x19\xcf\xfb\x4c\x00\xaf\x43\x4c\xc8\xcd\x86\x1c\xc9\x78\xec\x0d\xde\xdc\x9e\x15\xf3\x2c\xdf\x12\x31\x9a\xfb\xc4\xe4\xda\x95\xee\xd0\x24\x4b\xae\x7d\x6c\x9d\x7f\x2b\x3f\xec\x50\x27\xc9\xc6\x16\x9d\xf5\x5e\xde\x6a\x36\xfe\xf1\x4c\x46\x3c\xec\x02\x63\xcc\x2b\x89\xa7\xf7\x35\x6c\x71\x9c\x16\x75\x72\x01\xe0\x03\xb2\xca\x94\xf0\xe8\xce\x2e\x07\x65\x13\x9e\xb1\x65\xbe\xd2\x98\x60\x53\x4b\xf3\x66\xf1\xc1\x5b\xdf\x8e\x1f\xf2\xb3\xdb\xb4\x46\x76\x28\x18\x17\xe4\x97\xb9\x2b\xa7\xe3\x80\x3f\xa4\xe0\x56\x6b\x9f\x9c\x3c\xbf\xc5\xa6\x34\x1b\x40\x7b\x27\x8a\x1c\x72\x21\xa6\x2e\x42\x36\x47\xe7\x43\xbd\x6a\xb1\x81\xed\xff\x71\xd1\x6a\xce\x0f\x47\x6a\xeb\x28\x9e\x55\x8b\xcb\xa2\xa7\x19\x2e\x17\xd7\x8b\xcb\xa2\x55\x71\x7b\x32\x73\x0d\xf5\x70\x73\x7d\x5a\xca\x24\xca\x98\x6c\xfa\x4d\x66\xcc\xdf\x22\x7a\xbd\x2d\x80\x11\x75\x62\x92\xf1\xce\x3b\xc1\x9d\xcc\x69\xff\x82\x2f\x1d\x45\xa0\x08\x0e\x35\xc6\xa8\x78\x04\xef\xec\x08\x1b\xcf\x10\xc7\x28\xd8\x47\x78\x21\xe9\x60\xf5\xf0\x44\x2e\xed\xfe\x86\x97\x0e\x19\x4f\x48\x9c\x77\x75\x60\x1a\xc8\x62\x8b\x06\x22\x19\xd4\x8a\x0b\xcf\x40\x2b\xe7\xbc\x80\xd2\xf9\x15\x48\x8e\x76\x60\x7c\xaf\xc8\x41\x96\x8b\x72\x42\xa8\x19\x95\xa0\x81\xf5\x08\x05\xef\xdd\xea\x11\x0c\xd3\x80\x05\xf5\x62\xd6\x79\x04\x37\x20\x9c\x4a\x9d\x83\xb7\xa9\xc7\x8f\x79\xc7\x67\xde\xf6\xb9\xfa\x59\x49\xd7\x40\xb6\xf0\x24\x6d\xfb\x25\xef\x75\xd6\x86\x5e\x77\xbc\x27\x9c\xc5\x21\x27\x6b\xa2\x99\x8b\xda\x13\x0f\x8a\x97\x96\xd6\xcb\x1c\x46\x8b\xb2\xdc\x87\x36\x2e\xcb\x20\xcf\x23\x3c\x06\x6c\xe0\x9e\x78\xfa\xe3\xc6\x4f\x7c\x37\xb9\x52\xbd\xa3\xec\x47\x00\x00\x00\xff\xff\xb7\x8c\xb0\xd9\xf5\x05\x00\x00") +var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathResizerYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x53\x4d\x6f\xdb\x3a\x10\xbc\xfb\x57\x2c\xf2\xae\x4f\xf6\xf3\xe1\x01\x85\x80\x1c\x82\x24\x87\xa0\x69\x5a\xc0\x45\xef\x6b\x6a\x2d\x2d\x4c\x91\xec\x72\xa9\x58\xfd\xf5\x05\xe5\x7c\x50\x72\x1b\x02\xba\x2c\x77\x86\xb3\xb3\xa3\x23\xbb\xa6\x86\x9d\xa2\xd2\x21\xd9\x1d\xe9\x0a\x03\xff\x20\x89\xec\x5d\x0d\x18\x42\xdc\x0c\xdb\x55\x4f\x8a\x0d\x2a\xd6\x2b\x00\x87\x3d\xd5\x60\x22\x57\x9d\x8f\x1a\x50\xbb\x4a\x28\xf2\x2f\x92\x55\x0c\x64\x72\x4b\x24\x19\xd8\xd0\xd3\xd4\x79\xf5\xa7\xd6\xab\x15\x80\x50\xb0\x6c\x30\xd6\xb0\x9d\x30\x96\x8c\x7a\xc9\x78\x80\x1e\xd5\x74\x8f\xb8\x27\x1b\xcf\x05\xc8\x5a\xfe\xf2\x2c\x80\x52\x1f\x2c\x2a\xbd\x80\x0b\xb5\xf9\xd8\x19\xcf\xc7\x4c\x00\xaf\x43\x4c\x9d\x87\x03\x3b\xd6\xf1\x1d\x1b\x7c\x73\x73\x51\xcc\xb3\xfc\x4c\x2c\xd4\xdc\x25\x61\xd7\xee\x4c\x47\x4d\xb2\xec\xda\x87\xd6\xf9\xb7\xf2\xfd\x89\x4c\xd2\x6c\x6c\x81\xac\xce\xf2\x76\xb3\xf1\xdf\xcf\x64\xc4\xfd\x29\x08\xc5\xbc\x92\xb8\xbc\xaf\xe0\x48\xe3\xb4\xa8\xc5\x05\x80\x0f\x24\x98\x29\xe1\xc1\x5d\x5c\x0e\x68\x13\x5d\xb0\x65\xbe\xd2\x98\x60\x53\xcb\x73\xb0\xfa\xe0\xad\x6f\xc7\xcf\xf9\xd9\x63\xda\x93\x38\x52\x8a\x6b\xf6\x9b\x8c\xca\xe9\x78\xe9\x7f\x49\xc1\x8d\x31\x3e\x39\x7d\x7a\x8b\x4d\x69\x36\x80\xf1\x4e\x91\x1d\x49\x21\xa6\x2a\x42\x36\xef\xce\x87\x7b\x6c\xa9\x86\xe3\xa7\xb8\x6e\x8d\xe4\x87\x23\xb7\x55\x54\x2f\xd8\xd2\xa6\xc0\xd4\xc3\x76\xbd\x5d\xff\x57\x40\x51\xda\xc5\xcc\x15\x54\xc3\xf5\xff\xcb\x52\x26\xc1\xa6\xc9\xa6\x5f\x67\xc6\xfc\xad\xa3\x37\xc7\xa2\x31\x92\x49\xc2\x3a\xde\x7a\xa7\x74\xd2\x39\xed\x3f\xf0\xbd\xe3\x08\x1c\xc1\x91\xa1\x18\x51\x46\xf0\xce\x8e\x70\xf0\x02\x71\x8c\x4a\x7d\x84\x67\xd6\x0e\x76\xf7\x8f\xec\xd2\xe9\x5f\x78\xee\x48\x68\x41\xe2\xbc\xab\x82\xf0\xc0\x96\x5a\x6a\x20\x72\x43\x06\xa5\xf0\x0c\x0c\x3a\xe7\x15\xd0\xe4\x57\x20\x39\x3e\x41\xe3\x7b\x64\x07\x59\x2e\xe9\x82\xd0\x08\xa1\x52\x03\xfb\x11\x0a\xde\xdb\xdd\x03\x34\xc2\x03\x15\xd4\xeb\x19\xf2\xbd\xb9\x06\x95\x54\xea\x1c\xbc\x4d\x3d\x7d\xc9\x3b\xbe\xf0\xb6\xcf\xd5\x6f\xa8\x5d\x0d\xd9\xc2\x45\xda\xce\x4b\x3e\xeb\xac\x1a\x7e\xdd\xf1\x99\x70\x16\x87\x9c\xac\x89\x66\x2e\xea\x4c\x3c\xa0\x6c\x2c\xef\x37\x39\x8c\x96\x74\x73\x0e\x6d\xdc\x94\x41\x9e\x47\x78\x0c\x54\xc3\x1d\xcb\xf4\xc7\x8d\x5f\xe5\x76\x72\x65\xf5\x81\xb2\xdf\x01\x00\x00\xff\xff\xcf\x2b\x78\x14\x27\x05\x00\x00") func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathResizerYamlBytes() ([]byte, error) { return bindataRead( @@ -2597,7 +2640,7 @@ func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathResizerYaml() ( return a, nil } -var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathSnapshotterYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x53\x4d\x6f\xdb\x3a\x10\xbc\xeb\x57\x2c\xf2\xae\x8f\xf6\x4b\xf2\x02\x14\x02\x72\x08\x92\x1c\x82\xa6\x1f\x80\x8b\xde\xd7\xd4\x5a\x5a\x98\x22\x59\x72\xa9\x58\xff\xbe\xa0\xe4\xd4\x92\x8c\x26\x2d\x01\x5d\x96\x3b\xc3\xd9\xd9\xd1\x9e\x6d\x55\xc2\x86\x42\xc7\x9a\x0a\xf4\xfc\x9d\x42\x64\x67\x4b\xe8\x2e\x8b\x96\x04\x2b\x14\x2c\x0b\x00\x8b\x2d\x95\xa0\x23\xab\xc6\x45\xf1\x28\x8d\x8a\x16\x7d\x6c\x9c\x08\x85\x02\xc0\xe0\x96\x4c\xcc\xad\x00\xe8\xfd\x1b\xbd\xd1\x93\xce\x7d\x91\x0c\x69\x71\xe1\x4f\x30\x00\xde\x05\x39\xd2\xab\xa3\x9a\x2a\xb5\x6d\x3f\x54\xc6\xeb\x12\x2e\xaf\xae\xff\xbf\x29\x0a\xa5\x54\x71\x9c\x4c\x50\x68\x97\xcc\x86\x64\x36\x1d\x7a\x1f\xd7\x7f\x37\xe2\x49\xf6\x60\xd6\xe7\xa1\xfb\xe2\x77\xed\x17\x05\x40\x20\x6f\x58\x63\x2c\xe1\xf2\x6c\xdc\x16\x45\x37\xcf\x13\xcf\xde\x77\x40\xa8\xf5\x06\x85\x8e\x04\x13\xe5\xf9\x98\x19\xd7\xfb\x6c\x00\xaf\x03\x0d\xdd\xbb\x1d\x5b\x96\xfe\x84\xf7\xae\xba\x3b\x2b\xe6\x99\x7e\x24\x0e\x54\x3d\xa4\xc0\xb6\xde\xe8\x86\xaa\x64\xd8\xd6\x4f\xb5\x75\xbf\xca\x8f\x07\xd2\x49\xb2\xd1\x13\xa4\x1a\x25\x6e\x66\x36\x9c\xce\x60\xc8\xe3\xc1\x07\x8a\x79\x45\x71\x79\xaf\x60\x4f\xfd\xb0\xb8\xc5\x05\x80\xf3\x14\x30\x53\xc2\x93\x3d\xbb\xec\xd0\x24\x3a\x63\xcb\x7c\x53\x73\xbc\x49\x35\xcf\xc1\xe2\xbc\x33\xae\xee\x3f\xe6\x67\xf7\x69\x4b\xc1\x92\x50\x5c\xb1\x5b\x67\x54\x4e\xcb\xb1\xff\x98\x88\x3b\xad\x5d\xb2\x32\xba\xbe\x34\x1b\x40\x3b\x2b\xc8\x96\xc2\x44\x8c\x9a\x84\xee\x1c\x91\x0f\xb7\x58\x53\x09\xfb\x0f\x71\x55\xeb\x90\x1f\x8f\x5c\xab\x28\x2e\x60\x4d\xeb\x05\xae\xec\xae\x57\xff\xad\xae\x26\x70\x0c\xf5\x62\x76\x05\xaa\xbb\xbd\x59\x96\x54\x66\xc2\xaa\xca\xee\xdf\x66\xda\xfc\xad\xa2\xd3\xfb\x49\x67\x24\x9d\x02\x4b\x7f\xef\xac\xd0\x41\xe6\xbc\xff\xc0\xb7\x86\x23\x70\x04\x4b\x9a\x62\xc4\xd0\x83\xb3\xa6\x87\x9d\x0b\x10\xfb\x28\xd4\x46\x78\x61\x69\x60\xf3\xf8\xcc\x36\x1d\xfe\x85\x97\x86\x02\x2d\x48\xac\xb3\xca\x07\xee\xd8\x50\x4d\x15\x44\xae\x48\x63\x98\x98\x07\x1a\xad\x75\x02\xa8\xf3\x2b\x90\x2c\x1f\xa0\x72\x2d\xb2\x85\x2c\x97\x64\x41\xa8\x03\xa1\x50\x05\xdb\x1e\x26\xbc\xf7\x9b\x27\xa8\x02\x77\x34\xa1\x5e\xcd\x90\xa7\xe6\x12\x24\xa4\xa9\xce\xce\x99\xd4\xd2\xa7\xbc\xec\x33\x73\xdb\x5c\xfd\x8a\xd2\x94\x90\x2d\x5c\xc4\x6e\xdc\xf6\xa8\x53\x55\xfc\xba\xe8\x91\x70\x96\x8b\x1c\xb1\x81\x66\x2e\x6a\x24\xee\x30\xac\x0d\x6f\xd7\x39\x95\x86\x64\x3d\xa6\x37\xae\xa7\x89\x9e\x67\xb9\xf7\x54\xc2\x03\x87\xe1\xd7\xeb\xbf\x84\xfb\xc1\x95\xe2\x0d\x65\x3f\x03\x00\x00\xff\xff\x5f\x82\xa8\x3a\x1a\x06\x00\x00") +var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathSnapshotterYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x53\xcd\x6e\xdc\x3c\x0c\xbc\xef\x53\x10\xf9\xae\x9f\xd7\x09\xd0\x02\x85\x81\x1c\x82\x24\x87\xa0\xe9\x0f\xb0\x45\xef\x5c\x99\x6b\x13\x2b\x4b\xaa\x48\x39\xeb\xb7\x2f\x64\x27\x5d\xdb\x8b\xa6\x02\x7c\xa1\x38\xc3\xd1\x70\x7c\x64\x57\x57\xb0\x53\x54\x3a\x24\xbb\x23\xdd\x60\xe0\x9f\x14\x85\xbd\xab\x00\x43\x90\xb2\xbf\xd9\x74\xa4\x58\xa3\x62\xb5\x01\x70\xd8\x51\x05\x46\xb8\x68\xbd\x68\x40\x6d\x0b\x71\x18\xa4\xf5\xaa\x14\x37\x12\xc8\xe4\x36\xa1\xd8\xb3\xa1\xaf\x63\xf7\xd5\xdf\xda\xaf\x36\x00\x91\x82\x65\x83\x52\xc1\xcd\x88\xb3\x64\xd4\xc7\xcc\x01\xd0\xa1\x9a\xf6\x19\xf7\x64\x65\x2a\x40\xd6\xf4\xce\x78\x00\xa5\x2e\x58\x54\x7a\x25\x98\x29\xcf\xc7\x2e\xb8\xfe\xcd\x06\xf0\xf6\xa0\xb1\xfb\x70\x60\xc7\x3a\x9c\xf1\xc1\xd7\x77\x17\xc5\xfc\xa6\x5f\x89\x23\xd5\x0f\x29\xb2\x6b\x76\xa6\xa5\x3a\x59\x76\xcd\x53\xe3\xfc\x9f\xf2\xe3\x89\x4c\xd2\x6c\xf4\x0c\x59\x4c\x12\x77\x0b\x1b\xce\x67\x34\xe4\xf1\x14\x22\x49\x5e\x91\xac\xef\x0b\x38\xd2\x30\x2e\x6e\x75\x01\xe0\x03\x45\xcc\x94\xf0\xe4\x2e\x2e\x7b\xb4\x89\x2e\xd8\x32\xdf\xdc\x9c\x60\x53\xc3\x4b\xb0\xfa\xe0\xad\x6f\x86\xcf\x79\xec\x31\xed\x29\x3a\x52\x92\x2d\xfb\x32\xa3\x72\x5a\x5e\xfb\x5f\x13\x71\x67\x8c\x4f\x4e\x27\xd7\xd7\x66\x03\x18\xef\x14\xd9\x51\x9c\x89\x29\x66\xa1\xbb\x44\xe4\xc3\x1d\x36\x54\xc1\xf1\x93\x6c\x1b\x13\xf3\x70\xe1\xa6\x10\xf5\x11\x1b\x2a\x57\xb8\xaa\xff\xb0\xbd\xde\x5e\xcf\xe0\x18\x9b\xd5\xdb\x0b\x28\xfa\xdb\x8f\xeb\x52\x91\x99\xb0\xae\xb3\xfb\xb7\x99\x36\x7f\x5b\xf1\xe6\x38\xeb\x14\x32\x29\xb2\x0e\xf7\xde\x29\x9d\x74\xc9\xfb\x1f\xfc\x68\x59\x80\x05\x1c\x19\x12\xc1\x38\x80\x77\x76\x80\x83\x8f\x20\x83\x28\x75\x02\x2f\xac\x2d\xec\x1e\x9f\xd9\xa5\xd3\xff\xf0\xd2\x52\xa4\x15\x89\xf3\xae\x08\x91\x7b\xb6\xd4\x50\x0d\xc2\x35\x19\x8c\x33\xf3\xc0\xa0\x73\x5e\x01\x4d\x9e\x02\xc9\xf1\x09\x6a\xdf\x21\x3b\xc8\x72\x49\x57\x84\x26\x12\x2a\xd5\xb0\x1f\x60\xc6\x7b\xbf\x7b\x82\x3a\x72\x4f\x33\xea\xed\x02\x79\x6e\xae\x40\x63\x9a\xeb\xec\xbd\x4d\x1d\x7d\xc9\xcb\xbe\x30\xb7\xcb\xd5\xef\xa8\x6d\x05\xd9\xc2\x55\xec\xa6\x6d\x4f\x3a\x8b\x9a\xdf\x16\x3d\x11\x2e\x72\x91\x23\x36\xd2\x2c\x45\x4d\xc4\x3d\xc6\xd2\xf2\xbe\xcc\xa9\xb4\xa4\xe5\x94\x5e\x29\xe7\x89\x5e\x66\x79\x08\x54\xc1\x03\xc7\xf1\xd7\x1b\xbe\xc5\xfb\xd1\x95\xcd\x3b\xca\x7e\x07\x00\x00\xff\xff\x0c\x11\x2e\x29\x40\x05\x00\x00") func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathSnapshotterYamlBytes() ([]byte, error) { return bindataRead( @@ -2617,42 +2660,42 @@ func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathSnapshotterYaml return a, nil } -var _testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x91\x4d\x4b\x03\x41\x0c\x86\xef\xf3\x2b\x02\x5e\x14\xdc\x8a\x9e\x64\x6f\x7e\x80\xd7\xa5\x15\xef\xe9\x4c\xba\x9b\x76\x3b\x19\x92\xcc\x42\xfb\xeb\x65\x29\xf6\x52\x14\xa1\xd7\x99\xe4\x09\xef\xfb\xdc\x40\x51\x9e\x78\xa4\xd4\x53\x82\x4e\x12\xac\x28\x56\x65\x3f\x40\x27\x23\xc7\xc3\x3d\x14\xa5\x89\xa5\xda\x78\x80\x44\x1b\xce\x94\x60\x5b\xcd\x61\x23\x0a\x7d\xa4\xee\x1d\x26\x46\xe8\x4e\x98\x9e\xd2\x27\x99\x77\xab\xee\x6d\xac\xe6\xa4\x4b\x19\xe9\x95\x73\xe2\xdc\xdf\xde\x85\x1d\xe7\xd4\xc2\xe5\x57\xc0\xc2\x5f\xa4\xc6\x92\x5b\xd0\x35\xc6\x05\x56\x1f\x44\xf9\x88\xce\x92\x17\xbb\x67\x5b\xb0\x3c\x4c\x8f\x61\x4f\x8e\x09\x1d\xdb\x00\x90\x71\x4f\x2d\x14\x2b\x4d\x34\x6e\x06\x31\x2f\xe8\x43\xa3\x32\x52\xb0\xba\xde\x52\x74\x9b\xe7\x1a\x38\xdd\x5d\x91\x4e\x1c\xe9\x25\x46\xa9\xd9\x03\xc0\x0f\x62\x5e\x47\x77\x8c\x03\xe9\xf9\xd9\x0a\x46\x6a\xe7\xcc\x58\x47\xff\x2f\xa6\xa8\x4c\x3c\xe7\xb8\x9a\x64\x19\x8b\x0d\xe2\x7e\x35\x49\xc9\xf8\xf8\x3b\x65\xee\x6b\x49\x9b\xb9\xa9\x0b\x3f\xe7\x96\xe9\x89\x1a\x27\xf3\xa6\x9c\x45\x37\xc5\x4a\x00\xc0\xc2\x1f\x2a\xb5\xfc\x21\x2e\x7c\x07\x00\x00\xff\xff\x37\xc6\x5c\x16\x67\x02\x00\x00") +var _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathTestingYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x54\xcf\x6f\xeb\x36\x0c\xbe\xfb\xaf\x20\x5e\x0e\xbb\xd4\xf9\x81\xdd\x7c\x7b\x68\x8b\xa1\xd8\x5b\x57\x2c\xdd\x76\x66\x24\x26\x26\x22\x4b\x9a\x48\xa5\xf1\x7f\x3f\xc8\x49\x1a\x3b\x79\xa8\x4f\x86\xf8\xf1\xd3\xc7\x4f\x24\x67\xf0\xef\xf7\xbf\x5e\x5f\x5e\x7f\x6b\x40\x5b\x16\x60\x81\xe0\x5d\x0f\xdb\x90\x40\x49\x94\xfd\x0e\x62\x4e\x31\x08\xc9\x1c\x9e\x02\xf8\xa0\xc0\x5e\x14\x9d\x03\xf6\x80\x10\x53\xb0\xd9\x28\x07\x5f\xcd\xc0\xb8\x2c\x4a\x69\x5e\xcd\xaa\x19\xbc\x17\x42\x3a\x0e\xb9\xa0\x2d\x41\x1b\x44\x23\x6a\xfb\x8b\xc0\xdf\x9e\x8f\x60\x43\x87\xec\xc1\x08\xcf\x25\x98\x3d\xa0\x00\xc2\xfb\xe3\x1b\xc4\x90\x14\x34\x94\xa4\x6a\x06\x21\xab\xb0\x25\xf8\x08\xc9\xd9\x39\xbc\xb7\x04\x1d\xc6\x58\xa4\x6d\x53\xe8\x26\x5c\x85\x87\x86\xdc\xc2\xc3\x02\x36\xf8\xc2\xb1\xe9\x4b\x08\x75\xa4\x8c\x05\xb2\xd0\x36\xbb\x49\xb1\x1f\xac\x6d\x51\x54\x0b\x7a\xd6\x1e\x42\x02\x23\x66\x5e\x55\x18\xf9\x1f\x4a\xc2\xc1\x37\x70\x58\x55\x7b\xf6\xb6\x81\x35\xa5\x03\x1b\xaa\x3a\x52\xb4\xa8\xd8\x54\x00\x1e\x3b\x6a\x3e\x6b\xad\xe5\x0c\x91\x48\xa6\x84\xb5\x8f\xd4\xc0\x6b\xb0\xf4\x16\x92\x56\x00\x42\x8e\x8c\x86\x54\x82\x00\x18\x63\x33\xdc\x7f\x25\x28\xba\x2b\x18\x4c\x91\x02\xaa\x87\xdf\x06\x56\xcb\xe5\x72\x09\x33\xd8\xf2\x91\xec\xc9\x33\xf6\x83\x53\xc5\xec\x18\xec\x03\xd8\xde\x63\xc7\x06\x9d\xeb\x01\x9d\x2b\x4c\x17\xe8\xd9\xd5\xaa\xae\xeb\x4b\x31\x8a\x5a\xfc\x58\x93\x4e\xaa\xc5\x18\x65\x71\x58\xfd\xa4\xc8\x9f\xe8\xbc\x94\x79\x2e\xfb\x75\xc0\x7d\xbb\x07\x7e\xab\x00\x12\x45\xc7\x06\xa5\x81\xd5\x9d\x0f\x1d\xaa\x69\x7f\xe0\x86\x9c\x9c\x0e\xbe\xb2\x46\xa9\x8b\x0e\x95\xce\xa9\x23\x9d\xe5\x73\x13\x96\xaf\x78\x00\x2e\xf2\x07\xdc\x76\xcb\xa5\x07\xae\x99\x31\xd8\xef\x77\x87\xa5\x8e\xff\x32\x27\xb2\x4f\x39\xb1\xdf\xad\x4d\x4b\x36\x3b\xf6\xbb\x97\x9d\x0f\x9f\xc7\xcf\x47\x32\xb9\x8c\xc9\x38\xb3\x3e\x89\x5b\x4f\x4a\xbf\x7e\x83\x09\xcf\xc7\x98\x48\xca\x53\xc8\x6d\xbc\x86\x3d\xf5\xc3\x03\xdd\x04\x00\x42\xa4\x84\x85\x12\x5e\xfc\x5d\xf0\x80\x2e\xd3\x1d\x5b\xe1\x1b\xdb\x12\x5d\xde\xf1\x34\x59\x43\x0c\x2e\xec\xfa\xdf\xcb\xb5\xfb\xbc\xa1\xe4\x49\x49\xe6\x1c\x16\x25\xab\x74\xc5\x19\x6f\x82\x57\x64\x4f\x69\x74\x4d\x7d\x6e\x9b\xab\xdd\xa7\x8f\x3b\xdc\x51\x03\xe8\x22\x7b\x5a\x0c\xd1\x66\x35\x5f\xce\x7f\x1d\x61\x30\xed\x6e\x04\xd7\xa0\x26\xd6\x8e\x45\xc9\x37\xc3\x2c\x3c\x6c\x43\xda\x3f\x24\xca\x42\x68\x6d\xba\x41\x67\xcf\xc7\xda\x04\xef\xc9\x68\xb3\x30\xc2\x8b\xcb\xe2\x19\x01\x85\x4c\x4e\xac\xfd\x63\xf0\x4a\x47\x9d\xde\x78\x5d\x1c\x9e\x0c\x89\x60\xea\xaf\xdb\x52\x7a\x51\xea\xe4\xb4\x40\xd6\xcf\x3f\xd8\xe7\xe3\x03\x7c\xb4\x94\xe8\x86\xc4\x07\x5f\xc7\xc4\x07\x76\xb4\x23\x0b\x65\x0a\x0d\xa6\x91\x63\x60\xd0\x97\x3d\x8b\xa6\xdc\x32\x08\x9f\xee\xb7\x1b\x42\x93\x68\x98\xeb\x4d\x0f\x23\xde\xc7\xf5\x0b\xd8\xc4\x07\x1a\x51\xcf\x27\x99\x57\x70\x03\x9a\xf2\x58\xe7\x21\xb8\xdc\xd1\x1f\x21\x7b\x95\x69\xc3\x76\xe5\xec\x0d\xb5\x6d\xa0\x18\x38\x21\xfc\x7c\xde\x3d\x69\x6d\xf9\xf2\x02\x27\xb2\x49\x23\x94\x6e\x19\x48\xa6\x82\x4e\xb4\x07\x4c\x0b\xc7\x9b\x45\x69\x30\x47\xba\x38\x35\xa2\x2c\xc6\xcd\x39\x6d\xcb\x61\xa7\x3e\x71\x1a\xa6\xa8\xff\x33\x3d\x0e\x8e\x54\x5f\x28\xfb\x3f\x00\x00\xff\xff\x10\x2f\xbb\x26\xf9\x06\x00\x00") -func testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYamlBytes() ([]byte, error) { +func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathTestingYamlBytes() ([]byte, error) { return bindataRead( - _testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYaml, - "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml", + _testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathTestingYaml, + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml", ) } -func testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYaml() (*asset, error) { - bytes, err := testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYamlBytes() +func testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathTestingYaml() (*asset, error) { + bytes, err := testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathTestingYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _testE2eTestingManifestsStorageCsiHostpathUsageCsiStorageclassYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x54\xcc\xb1\x8a\xc3\x30\x0c\x87\xf1\x5d\x4f\xa1\x17\x48\x8e\xdb\x0e\x8f\x77\xb7\x74\x28\x14\x0a\xdd\x85\xfd\x27\x11\xb1\xad\x60\xa9\x81\xbe\x7d\x29\x9d\xba\x7e\x1f\xfc\x64\xd7\x1b\x86\xab\xf5\xc4\x1e\x36\x64\xc1\xbc\xfd\xf8\xac\xf6\x75\x7c\xd3\xa6\xbd\x24\xbe\xbe\xfb\x5f\x15\x77\x6a\x08\x29\x12\x92\x88\xb9\x4b\x43\xe2\xec\x3a\xad\xe6\xb1\x4b\xac\x93\x67\xda\x87\x1d\xfa\x12\x31\x3e\x27\x0d\xe4\x2a\xda\x2e\x56\x35\x3f\x12\xff\xa3\x22\x40\x87\xd5\x7b\xc3\xaf\xf6\xa2\x7d\x39\x5b\x41\xe2\x53\x6b\x28\x2a\x01\x7a\x06\x00\x00\xff\xff\x20\x58\xb7\x12\xa0\x00\x00\x00") +var _testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x92\xbd\xaa\x1b\x41\x0c\x85\xfb\x79\x0a\x41\x9a\x04\x32\x0e\x49\x15\xb6\xcb\x0f\xa4\x5d\xec\x90\x5e\x9e\x91\x77\x65\x8f\x47\x83\xa4\x59\x62\x3f\x7d\x58\x4c\xdc\x98\x7b\xb9\xe0\x56\x3f\xdf\xe1\x70\xce\x3b\x68\xca\x0b\x17\xca\x13\x65\x18\x25\xc3\x8e\x52\x57\xf6\x0b\x8c\x52\x38\x5d\x3e\x42\x53\x5a\x58\xba\x95\x0b\x64\x3a\x70\xa5\x0c\xc7\x6e\x0e\x07\x51\x98\x12\x8d\x3f\x61\x61\x84\xf1\x86\x99\x28\xff\x26\xf3\x71\x37\xfe\x28\xdd\x9c\x74\x2b\x85\xbe\x73\xcd\x5c\xa7\xf7\x1f\xc2\x89\x6b\x1e\xe0\x71\x15\xb0\xf1\x1f\x52\x63\xa9\x03\xe8\x1e\xd3\x06\xbb\xcf\xa2\x7c\x45\x67\xa9\x9b\xd3\x57\xdb\xb0\x7c\x5a\x3e\x87\x33\x39\x66\x74\x1c\x02\x40\xc5\x33\x0d\xd0\xac\xc5\x64\x1c\x67\x31\x6f\xe8\x73\x54\x29\x14\xac\xef\x8f\x94\xdc\xd6\xbb\x08\x37\xdd\x1d\xe9\xc2\x89\xbe\xa5\x24\xbd\x7a\x00\xf8\x8f\x58\xdf\xd1\x1d\xd3\x4c\x7a\x1f\x5b\xc3\x44\xc3\xea\x19\x7b\xf1\xb7\x62\x9a\xca\xc2\xab\x8f\xa7\x49\x56\xb1\xd9\x2c\xee\x4f\x93\x94\x8c\xaf\x4f\x53\xe8\xaf\x93\x56\x2c\x71\x26\x2c\x3e\xc7\xb3\x54\x76\xd1\x98\xa4\xba\x4a\x29\x2f\x0b\xac\x81\x6c\xe9\xb0\x46\xf1\x50\x80\x7b\x8c\xf4\x85\xa2\x93\x79\x6c\xf7\x26\xc5\x66\x2d\x00\x60\xe3\x5f\x2a\xbd\xbd\xd2\x8c\xf0\x2f\x00\x00\xff\xff\x97\x97\x9b\x96\xc8\x02\x00\x00") -func testE2eTestingManifestsStorageCsiHostpathUsageCsiStorageclassYamlBytes() ([]byte, error) { +func testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYamlBytes() ([]byte, error) { return bindataRead( - _testE2eTestingManifestsStorageCsiHostpathUsageCsiStorageclassYaml, - "test/e2e/testing-manifests/storage-csi/hostpath/usage/csi-storageclass.yaml", + _testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYaml, + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml", ) } -func testE2eTestingManifestsStorageCsiHostpathUsageCsiStorageclassYaml() (*asset, error) { - bytes, err := testE2eTestingManifestsStorageCsiHostpathUsageCsiStorageclassYamlBytes() +func testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYaml() (*asset, error) { + bytes, err := testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "test/e2e/testing-manifests/storage-csi/hostpath/usage/csi-storageclass.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -2817,6 +2860,26 @@ func testE2eTestingManifestsStorageCsiMockCsiStorageclassYaml() (*asset, error) return a, nil } +var _testE2eTestingManifestsStorageCsiUpdateHostpathSh = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x57\x5b\x73\xdb\xb8\x15\x7e\xc7\xaf\x38\xa1\x34\xb1\xe5\x31\x49\xdb\xed\x43\x47\x8e\x33\xa3\xd8\xde\x56\xb3\x89\xed\x91\x9c\x6e\x33\x9b\x4d\x07\x02\x8f\x48\xac\x41\x80\x05\x40\xc9\xda\x6e\xff\x7b\xe7\x80\x17\x2b\x96\x92\xcd\x6c\xf8\x62\xe3\xf6\x9d\xdb\x87\x0f\x47\x83\x17\xe9\x42\xea\xd4\x15\x8c\x0d\xe0\xd2\x54\x1b\x2b\xf3\xc2\xc3\xd9\xc9\xd9\x29\xdc\x17\x08\x3f\xd6\x0b\xb4\x1a\x3d\x3a\x98\xd4\xbe\x30\xd6\x25\x6c\xc0\x06\xf0\x56\x0a\xd4\x0e\x33\xa8\x75\x86\x16\x7c\x81\x30\xa9\xb8\x28\xb0\x5b\x39\x86\x7f\xa2\x75\xd2\x68\x38\x4b\x4e\xe0\x90\x36\x44\xed\x52\x34\x3a\x67\x03\xd8\x98\x1a\x4a\xbe\x01\x6d\x3c\xd4\x0e\xc1\x17\xd2\xc1\x52\x2a\x04\x7c\x14\x58\x79\x90\x1a\x84\x29\x2b\x25\xb9\x16\x08\x6b\xe9\x8b\x60\xa6\x05\x49\xd8\x00\x3e\xb4\x10\x66\xe1\xb9\xd4\xc0\x41\x98\x6a\x03\x66\xb9\xbd\x0f\xb8\x0f\x0e\xd3\x57\x78\x5f\x8d\xd3\x74\xbd\x5e\x27\x3c\x38\x9b\x18\x9b\xa7\xaa\xd9\xe8\xd2\xb7\xd3\xcb\xeb\x9b\xf9\x75\x7c\x96\x9c\x84\x23\xef\xb5\x42\xe7\xc0\xe2\x7f\x6a\x69\x31\x83\xc5\x06\x78\x55\x29\x29\xf8\x42\x21\x28\xbe\x06\x63\x81\xe7\x16\x31\x03\x6f\xc8\xdf\xb5\x95\x5e\xea\xfc\x18\x9c\x59\xfa\x35\xb7\xc8\x06\x90\x49\xe7\xad\x5c\xd4\xfe\xb3\x64\x75\xde\x49\xf7\xd9\x06\xa3\x81\x6b\x88\x26\x73\x98\xce\x23\x78\x33\x99\x4f\xe7\xc7\x6c\x00\x3f\x4d\xef\xff\x71\xfb\xfe\x1e\x7e\x9a\xcc\x66\x93\x9b\xfb\xe9\xf5\x1c\x6e\x67\x70\x79\x7b\x73\x35\xbd\x9f\xde\xde\xcc\xe1\xf6\x07\x98\xdc\x7c\x80\x1f\xa7\x37\x57\xc7\x80\xd2\x17\x68\x01\x1f\x2b\x4b\xfe\x1b\x0b\x92\xd2\x88\x19\xe5\x6c\x8e\xf8\x99\x03\x4b\xd3\x38\xe4\x2a\x14\x72\x29\x05\x28\xae\xf3\x9a\xe7\x08\xb9\x59\xa1\xd5\x52\xe7\x50\xa1\x2d\xa5\xa3\x62\x3a\xe0\x3a\x63\x03\x50\xb2\x94\x9e\xfb\x30\xb3\x13\x54\x42\x5c\xba\xa7\x72\x3a\x61\x65\xe5\x61\x2d\x95\x82\xba\xca\xb8\x47\xe0\x4a\x81\x93\x19\x0a\x6e\x61\xf6\x66\x72\x19\x4a\x1e\x60\x03\xc4\xe5\x7c\x0a\x85\x71\xbe\xe2\xbe\xa0\xe4\x61\xa5\xcc\xa6\x44\xed\xdb\x7d\xae\x16\x44\x03\xee\x69\xf7\x06\x4a\xee\x45\x01\x6b\x1a\x4b\x07\x81\x03\xdd\x69\xc8\xac\x5c\xa1\x65\x03\xb0\xa8\x90\x07\xc6\xb0\x01\xbc\x41\xaa\x4b\x07\x21\x5d\xe3\xdc\x5a\x56\x08\xa6\xf6\xc1\x3d\x65\x04\x57\x50\x9a\x8c\xf2\xd1\x04\xf9\x22\x5c\x0f\xae\x61\x81\xc0\xc1\xf3\x3c\x94\x1e\x16\x96\x6b\x51\x24\xac\x89\xf3\x22\x1a\x9e\x44\xac\xb3\xff\xef\x55\xc3\xff\x8b\x68\x78\x1a\x31\x26\x97\xf0\x02\x7e\x86\x68\xf8\x7c\x3d\x82\x5f\xce\x29\x18\xcd\x88\xa2\x82\x7b\x78\xfd\xf2\x0c\x5e\xbd\xba\xbe\xfd\x81\xbd\x77\x3c\xc7\x31\x0c\x4f\xe0\x55\x1f\x56\x6b\xbc\x31\x0d\x9a\x97\xf8\x9a\xb1\x59\xc7\xd1\x8a\x5b\x5e\xa2\x47\x4b\xf9\x08\x45\xd3\x79\xc2\x08\x8b\xd0\xf1\x51\x7a\x38\x65\x4b\xc9\x98\x43\x0f\xf1\x23\x32\x91\x41\x34\x3c\xcc\xa4\x25\x24\xa0\x08\x46\x11\x05\x3b\xc3\xd2\xac\x10\x9c\xe7\x0a\x9b\xdc\x27\xcc\x96\x10\xdb\x25\xe0\xa3\x47\xab\xb9\x8a\xb9\xf7\x74\x8b\xec\xd3\x4c\x65\xcd\x4a\x52\x54\xdb\x93\x16\x9d\xfc\x6d\x7b\xc2\x69\x5e\xb9\xc2\x78\xbf\x3d\x59\x20\x57\xbe\x88\x4b\xa3\xa5\x37\xf6\xa9\x8c\xc2\xc9\xb8\x29\x65\x4c\x73\x71\x60\x06\x55\xa3\x40\xf1\x10\x6a\x96\xa1\x0b\xb1\xf7\x75\xce\xa5\x07\xa1\x8c\xc6\x70\xe3\xdd\x38\x4d\x73\xe9\x8b\x7a\x91\x08\x53\xa6\x0f\xbd\xa2\xc5\xc2\xc9\x74\x1f\x7c\x92\x4b\xcf\x0e\x45\xb6\xd7\x36\xbc\x7c\x09\xc1\x00\xd9\x27\xf3\x7b\x4a\x3a\x62\xde\xf2\x0a\xa2\x36\x61\xfb\x60\x22\xb8\xfe\xd7\xf4\x9e\x02\x79\x47\xda\xf5\x61\xf2\xee\x6d\x97\xe6\xf2\x21\x93\x4f\x09\x60\x81\x12\xdd\x28\x9d\x5d\x4f\xae\xde\x5d\x27\x65\xd6\x52\x84\x44\xba\xb9\x1a\x52\x37\x84\xce\xa4\x45\xe1\x8d\xdd\x00\x11\x1d\x1f\xb9\xf0\x41\x18\x1d\x29\x63\xb4\x15\xbe\xe2\x1e\x9d\x8f\x40\x6a\xf6\xe7\xf2\x94\x7a\x8b\x98\xee\x84\x9f\x36\xb7\x36\x65\xec\xca\x04\x79\xc7\x4c\x7a\x28\xb9\xae\xb9\x52\x9b\x04\x66\xb5\x86\x61\x2b\x0d\xde\x80\xc5\xa5\x45\xd7\x88\xbb\x30\xda\xa3\xf6\x0d\x63\x45\x05\xb1\xdd\x9b\xbc\xce\xc2\x4e\x30\x69\x4f\x9b\x3e\x61\xcf\xf2\xd7\xff\x83\x67\x18\xd3\x91\xd8\x2e\xb8\x48\x36\xbc\x54\x6d\x46\x07\x50\x59\xb9\x92\x0a\xb3\x1c\x33\xb8\x33\x19\xcc\x51\xd4\x56\xfa\x0d\xdc\x19\x25\xc5\xe6\x18\x2a\x8b\x2b\x69\x6a\xa7\x36\x90\xe1\x52\x6a\xcc\xe0\xd7\xda\xf9\x20\xa6\xb9\xc0\xbb\x2b\x58\x49\x0e\x77\x0d\x4c\x8e\xd9\x3d\x3a\x7f\x37\xbf\xbb\x54\xb5\xf3\x68\x67\x46\xe1\x1b\xa9\x33\xa9\xf3\xc3\x11\x7b\x90\x3a\x1b\xc3\xee\x12\xe3\x95\x6c\x9f\xd0\x31\x04\x1f\x79\x78\x80\xe5\x6f\x41\x92\x92\x87\xbf\xb9\x44\x9a\x74\x75\xca\x4a\xf4\x3c\xe3\x9e\x8f\x19\x04\x3d\x18\x43\xe5\x2a\x2a\x5a\xdc\x05\x1b\x5b\xa3\x90\xb9\x7a\xf1\x2b\x0a\xef\x68\x5f\x0c\x8d\xdd\x39\xda\x95\x14\x38\x11\xc2\xd4\xda\x07\x91\x68\x20\xe8\x78\x77\xbf\xfb\x69\x57\x71\x81\x63\x8a\x99\xd7\xca\x7f\x2b\xcc\x96\x28\x7c\x27\xd2\x96\x70\x7c\x27\x52\xab\x49\xdf\x89\xf2\x05\xf1\x8a\x89\xc6\xd6\x28\xf5\x65\x03\x54\x90\x19\x2e\xa9\x14\x3b\x04\xe8\xcb\xd8\x53\xb4\xea\x99\x14\x57\xae\x62\x00\xbc\x92\x7f\xb7\xa6\xae\xbe\xc2\x8c\x70\x85\x58\x66\xd6\x5a\x19\x9e\xc1\xe1\x08\xfe\x1b\x9c\xa9\xac\x21\x16\x34\xcf\x52\x98\xe0\xbe\xb8\x88\x86\x67\xcd\xc8\xf3\xfc\x22\x1a\xfe\xa5\x19\x10\xf8\x45\x34\xfc\x6b\xc4\xc2\xb0\x11\xa6\xb8\x82\x68\xd8\xa2\xa4\xc3\xa0\x65\x61\xb5\xb6\xea\x22\xfa\x63\x21\xe9\x8f\x5a\xbe\x4e\x87\x9e\xe7\xbb\x97\xb9\x41\x4d\x87\x64\x3e\x7a\x7a\x16\x9f\x59\x6d\xd7\xfb\x5b\xbb\x25\x36\xc7\xd0\xc5\x8d\x19\x2c\xad\x29\x61\x58\x5b\xc5\x06\xe1\x82\xee\x15\xf5\x1d\x11\x63\x03\xea\xf7\x5a\x99\x62\x83\xfe\x09\x15\xb5\x55\x10\xc7\x4b\x2e\xe9\x0f\xb5\x0a\x94\x73\x88\xc8\x40\x04\xaf\xbf\xe0\x24\xfb\x5f\x78\x53\x9f\xfa\x1d\x72\x04\xb9\x28\xba\x5e\x28\xe9\x5a\x26\x8b\x4a\xa2\xa3\x46\xb0\xd5\xc3\x15\xea\x60\x21\x74\x2c\xd1\xd3\x1b\x6a\x4a\xf4\x85\xd4\x79\x04\x05\x77\x10\x85\xeb\xf1\x34\xc7\x1d\xc8\x92\xba\x38\x62\x12\x81\x7f\xe9\x9d\xa5\x2e\x8d\x5a\x3f\xae\x8e\x41\xfa\x80\xe5\xd7\xa6\x39\xec\x1a\xaf\x10\x2c\x56\xc6\xc9\xf0\xa6\xf4\x8e\x37\xf0\xd4\x75\xe5\xda\x58\x6a\x2e\x9b\x33\x17\xc3\xc3\xdc\x62\x90\xee\x83\x4f\x70\x14\x26\xc7\xc9\x91\x70\xf2\x00\x76\x54\x18\x7e\x07\xfa\x11\x11\x23\x1c\xb8\xf3\xa4\xdf\x9c\x9e\x9f\x1f\xc0\xef\xd0\xe0\xac\xe0\x40\x9b\x0c\xbb\x9a\x59\xcc\xa9\x61\xe6\x96\x76\x38\x63\x3d\xc4\xf5\x88\x2d\x43\x97\x1b\x3c\xd2\x30\x6c\x3c\x39\x87\xcc\xf4\x9c\x1e\x1e\xa2\x28\x0c\x44\xcd\x5a\xb4\x6d\x38\x4d\x8e\xc6\x69\x7a\x30\x7a\xba\x0d\x5b\xe4\xef\x5f\x87\x8e\x88\x0e\x5b\x7c\x7a\x37\xa1\xfd\x3e\x13\x83\xa3\x51\x3f\x4f\xdf\xe0\xab\x52\xc1\x73\xd4\x7e\xbc\x3a\x49\xe8\x57\xc7\xf6\xb1\xee\x9e\x7e\xcd\x71\x02\xfe\x78\x98\x1c\x7d\x1c\xc5\x3f\x7f\x1a\xff\x72\x44\xa9\xfb\x78\xda\x85\xd2\x23\x51\x48\x7f\x0c\x13\x10\x3e\x8e\xf6\x63\x9c\x9f\xf7\xc3\x67\xe1\x7d\x8b\x9f\x84\x99\x1e\xb4\x83\x4f\xa4\x02\x5d\x3a\x9e\xdb\x69\xf2\xdb\x62\x6e\x67\xb8\xfb\xf6\x35\x91\xa3\x9d\x5d\x4d\xde\x27\xda\x84\x5f\x42\x2d\xc1\x03\x78\x92\x24\x7b\x77\x37\x2a\xf8\xec\x91\x89\xf6\x6e\x7d\x22\x46\xfc\x6c\xff\x16\x51\x9e\x7f\x5b\x09\x0c\x61\x38\x2e\xf6\x25\xb8\x9f\xef\x85\xbb\x17\x94\x88\xfe\x0d\x9d\x63\x44\xb2\x49\x7f\x1a\x6d\xc9\x8c\x46\xf6\xff\x00\x00\x00\xff\xff\x48\x15\xaf\x74\xc9\x0f\x00\x00") + +func testE2eTestingManifestsStorageCsiUpdateHostpathShBytes() ([]byte, error) { + return bindataRead( + _testE2eTestingManifestsStorageCsiUpdateHostpathSh, + "test/e2e/testing-manifests/storage-csi/update-hostpath.sh", + ) +} + +func testE2eTestingManifestsStorageCsiUpdateHostpathSh() (*asset, error) { + bytes, err := testE2eTestingManifestsStorageCsiUpdateHostpathShBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "test/e2e/testing-manifests/storage-csi/update-hostpath.sh", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _testE2e_nodeTestingManifestsSriovdpCmYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x90\xc1\x4b\xfb\x30\x14\xc7\xef\xfd\x2b\x1e\x39\x6f\x3f\xd2\xfd\x36\x19\xb9\x7a\x55\x0f\x1e\xbc\x8c\x31\xd2\xe4\x75\x3c\xd7\xa4\x21\xc9\xc2\x44\xf7\xbf\x4b\xda\xaa\x8c\x3a\x9c\x88\x60\x0f\x25\x4d\x3e\xdf\xef\x6b\x3e\xd2\xd1\x03\xfa\x40\xad\x15\x90\xca\x62\x47\x56\x0b\xb8\x6e\x6d\x4d\xdb\x5b\xe9\x0a\x83\x51\x6a\x19\xa5\x28\x00\xac\x34\x28\x20\x78\x6a\x93\x76\x53\xd5\x31\xc3\x76\x70\x52\xa1\x80\xdd\xbe\xc2\x69\x78\x0a\x11\x4d\xf1\x96\xea\xb9\x7f\x8f\x21\x4f\x78\x29\x00\x00\x9e\xbb\x77\x7e\x98\xc7\xd0\xee\xbd\xc2\x1b\x0a\x91\x09\x58\x7d\x1c\x8d\x90\x3b\x69\x90\x09\x60\x64\x23\x36\x9b\xee\x37\x36\x16\xa3\xc6\x44\x0a\xd9\x64\x1c\x0c\xd8\xa0\x8a\xad\x0f\x4c\xc0\xb8\xb7\x43\x12\x5a\xdd\x03\x2b\xb6\xe4\xcb\x2b\xb6\x1e\xf7\x74\x60\x3f\xa5\x07\xcb\xc5\x5c\xb1\x09\xb0\x92\xa3\x3e\x1f\xf0\x94\x70\x68\xa6\x39\xc7\x54\xe7\x08\x1d\xb6\x55\x5e\xae\x47\xa1\xe3\xc9\xce\xf1\xb4\xf5\x9b\x56\xb4\xd3\xbb\x3f\x2d\x24\xd5\xd4\x4e\x9d\xa2\xb3\xb0\xab\xf3\xbd\x7a\x18\xad\xe3\x81\xd7\x9c\x4d\xf2\x72\x16\x66\x75\xf9\x0b\xfe\x4c\x63\x0f\x83\x3e\xaf\x8d\xfc\x4c\x1f\x85\xfb\x7c\x22\x20\xfa\x3d\xfe\x5c\x6f\xb9\xa8\xfe\x5f\xa6\x97\x97\xcb\x8b\xb4\x9a\xe6\xb0\xd8\x50\xf5\xb5\x9d\xf7\xaf\x9e\x3c\x16\xaf\x01\x00\x00\xff\xff\x2a\x5e\x3a\xd9\x06\x04\x00\x00") func testE2e_nodeTestingManifestsSriovdpCmYamlBytes() ([]byte, error) { @@ -2877,7 +2940,7 @@ func testE2e_nodeTestingManifestsSriovdpSaYaml() (*asset, error) { return a, nil } -var _testImagesMakefile = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x53\x6f\x6f\xa3\x36\x1c\x7e\x7d\xfe\x14\x8f\xc2\xbd\x68\xa5\x40\xda\x68\xdb\x49\x99\xaa\x13\x4d\x59\x8b\xda\x83\x1b\xd0\xab\xfa\x2a\x72\xe0\x17\xb0\x4a\x6c\x66\x9b\x26\xd1\xb4\xef\x3e\x19\x9a\x6d\x59\x37\x1d\xef\x6c\x3f\xff\xfc\xf8\x87\x87\xa5\xea\x0e\x5a\xd4\x8d\xc5\xfc\xe2\xf2\x13\x8a\x86\x70\xdf\xaf\x49\x4b\xb2\x64\x10\xf6\xb6\x51\xda\x04\xcc\x63\x1e\x1e\x44\x49\xd2\x50\x85\x5e\x56\xa4\x61\x1b\x42\xd8\xf1\xb2\xa1\xe3\xc9\x14\xdf\x48\x1b\xa1\x24\xe6\xc1\x05\xce\x1c\x60\xf2\x76\x34\x39\xff\x99\x79\x38\xa8\x1e\x5b\x7e\x80\x54\x16\xbd\x21\xd8\x46\x18\x6c\x44\x4b\xa0\x7d\x49\x9d\x85\x90\x28\xd5\xb6\x6b\x05\x97\x25\x61\x27\x6c\x33\xd8\xbc\x89\x04\xcc\xc3\xf3\x9b\x84\x5a\x5b\x2e\x24\x38\x4a\xd5\x1d\xa0\x36\xff\xc4\x81\xdb\x21\xb0\xfb\x1a\x6b\xbb\xc5\x6c\xb6\xdb\xed\x02\x3e\x84\x0d\x94\xae\x67\xed\x08\x34\xb3\x87\x78\x19\x25\x79\xe4\xcf\x83\x8b\x81\xf2\x28\x5b\x32\x06\x9a\x7e\xeb\x85\xa6\x0a\xeb\x03\x78\xd7\xb5\xa2\xe4\xeb\x96\xd0\xf2\x1d\x94\x06\xaf\x35\x51\x05\xab\x5c\xde\x9d\x16\x56\xc8\x7a\x0a\xa3\x36\x76\xc7\x35\x31\x0f\x95\x30\x56\x8b\x75\x6f\x4f\xca\x3a\xa6\x13\xe6\x04\xa0\x24\xb8\xc4\x24\xcc\x11\xe7\x13\x5c\x87\x79\x9c\x4f\x99\x87\xa7\xb8\xb8\x4b\x1f\x0b\x3c\x85\x59\x16\x26\x45\x1c\xe5\x48\x33\x2c\xd3\xe4\x26\x2e\xe2\x34\xc9\x91\xfe\x82\x30\x79\xc6\x7d\x9c\xdc\x4c\x41\xc2\x36\xa4\x41\xfb\x4e\xbb\xfc\x4a\x43\xb8\x1a\xa9\x72\x9d\xe5\x44\x27\x01\x36\x6a\x0c\x64\x3a\x2a\xc5\x46\x94\x68\xb9\xac\x7b\x5e\x13\x6a\xf5\x4a\x5a\x0a\x59\xa3\x23\xbd\x15\xc6\x3d\xa6\x01\x97\x15\xf3\xd0\x8a\xad\xb0\xdc\x0e\x3b\xef\x2e\x15\x30\x96\x45\xb7\x71\x5e\x64\xcf\xf8\x7c\x85\xba\xd4\x81\x50\xb3\x97\xbf\x26\xc9\xa7\x39\xf9\x96\x8c\xf5\xc5\x96\xd7\x64\xd8\x6d\x1a\x66\x5f\x1c\xf4\x13\xbb\x49\x97\xf7\x51\xb6\x5a\x46\x59\xb1\xba\x0e\xf3\x68\xf5\x35\x2c\xee\xf0\xf9\x8a\xfd\x1a\x7d\x79\xfc\x16\x65\x79\x9c\x26\x57\xaf\x3f\x06\x97\xc1\x85\x3f\x67\xb7\xe9\x43\x98\xdc\xae\x8e\xfb\x97\xc1\xe5\x4f\xc1\x0f\x8c\xf6\x9d\xd2\x96\x31\xb1\x91\x15\x6d\xf0\x74\x17\x16\xec\xe3\x19\x69\xad\xf4\xb0\x70\xa5\xf3\xbf\x9f\xf5\x95\x6b\xe1\x5e\x74\x0a\xda\x2f\xb0\xe5\x2f\x04\xde\xb6\x03\xf2\x4a\x92\x3d\x67\x24\x2b\xb1\x61\xcc\xc3\x75\x2f\xda\x0a\xa5\xaa\x68\xfc\x09\x42\x5d\x9b\xc5\x30\x5b\x0e\xbc\xc0\x8d\xd0\x54\x5a\xa5\x0f\x90\x7c\x4b\xc6\x0d\xc5\xda\x51\x46\x74\xb4\xe7\xdb\xae\xa5\x91\x70\x6a\x53\xb6\xbd\xb1\xa4\x79\x27\x86\x5e\x48\x33\xde\xb6\x0b\x77\xee\x97\x4a\xba\xe1\x26\xcd\xd8\xc9\x72\xc1\x3e\x04\xb3\xa1\x3f\xbf\xb7\xa2\x0d\x4c\x33\x7a\xe1\xe3\x99\x93\x3c\xc7\xa4\x52\xe5\x0b\xe9\xc9\xc8\xeb\x7a\xd3\x2c\xd8\x87\x35\x37\x0d\xfc\x3d\xbe\x43\xd5\x54\xbb\xa1\x3c\x4c\xfe\x9f\xe1\x04\x8f\x84\xd1\x62\xd0\xf0\xb9\xac\x8e\x66\xff\x69\xb2\xe2\xb2\x5a\x8d\xe4\xdf\x1d\xf9\x0f\xc6\x82\xaf\x77\x69\xf2\x3c\x5c\x17\xef\x85\x70\x8c\xff\xaf\x3a\xfe\x0c\x00\x00\xff\xff\x35\xea\xd1\x8c\xaf\x04\x00\x00") +var _testImagesMakefile = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x53\x6f\x6f\xa3\x36\x1c\x7e\x7d\xfe\x14\x8f\xc2\xbd\x68\xa5\x40\xda\x48\xdb\x69\x99\xaa\x13\x4d\x59\x8b\xda\x83\x1b\xd0\xab\xfa\x2a\x72\xe0\x17\xb0\x4a\x6c\x66\x9b\x26\xd1\xb4\xef\x3e\x19\x9a\x6d\x59\x37\x1d\xef\x6c\x3f\xff\xfc\xf8\x87\x87\xa5\xea\x0e\x5a\xd4\x8d\xc5\xfc\xe2\xf2\x13\x8a\x86\x70\xdf\xaf\x49\x4b\xb2\x64\x10\xf6\xb6\x51\xda\x04\xcc\x63\x1e\x1e\x44\x49\xd2\x50\x85\x5e\x56\xa4\x61\x1b\x42\xd8\xf1\xb2\xa1\xe3\xc9\x14\xdf\x48\x1b\xa1\x24\xe6\xc1\x05\xce\x1c\x60\xf2\x76\x34\x39\xff\x99\x79\x38\xa8\x1e\x5b\x7e\x80\x54\x16\xbd\x21\xd8\x46\x18\x6c\x44\x4b\xa0\x7d\x49\x9d\x85\x90\x28\xd5\xb6\x6b\x05\x97\x25\x61\x27\x6c\x33\xd8\xbc\x89\x04\xcc\xc3\xf3\x9b\x84\x5a\x5b\x2e\x24\x38\x4a\xd5\x1d\xa0\x36\xff\xc4\x81\xdb\x21\xb0\xfb\x1a\x6b\xbb\xc5\x6c\xb6\xdb\xed\x02\x3e\x84\x0d\x94\xae\x67\xed\x08\x34\xb3\x87\x78\x19\x25\x79\xe4\xcf\x83\x8b\x81\xf2\x28\x5b\x32\x06\x9a\x7e\xeb\x85\xa6\x0a\xeb\x03\x78\xd7\xb5\xa2\xe4\xeb\x96\xd0\xf2\x1d\x94\x06\xaf\x35\x51\x05\xab\x5c\xde\x9d\x16\x56\xc8\x7a\x0a\xa3\x36\x76\xc7\x35\x31\x0f\x95\x30\x56\x8b\x75\x6f\x4f\xca\x3a\xa6\x13\xe6\x04\xa0\x24\xb8\xc4\x24\xcc\x11\xe7\x13\x5c\x87\x79\x9c\x4f\x99\x87\xa7\xb8\xb8\x4b\x1f\x0b\x3c\x85\x59\x16\x26\x45\x1c\xe5\x48\x33\x2c\xd3\xe4\x26\x2e\xe2\x34\xc9\x91\xfe\x82\x30\x79\xc6\x7d\x9c\xdc\x4c\x41\xc2\x36\xa4\x41\xfb\x4e\xbb\xfc\x4a\x43\xb8\x1a\xa9\x72\x9d\xe5\x44\x27\x01\x36\x6a\x0c\x64\x3a\x2a\xc5\x46\x94\x68\xb9\xac\x7b\x5e\x13\x6a\xf5\x4a\x5a\x0a\x59\xa3\x23\xbd\x15\xc6\x3d\xa6\x01\x97\x15\xf3\xd0\x8a\xad\xb0\xdc\x0e\x3b\xef\x2e\x15\x30\x96\x45\xb7\x71\x5e\x64\xcf\xf8\x7c\x85\xba\xd4\x81\x50\xb3\x97\xbf\x26\xc9\xa7\x39\xf9\x96\x8c\xf5\xc5\x96\xd7\x64\xd8\x6d\x1a\x66\x5f\x1c\xf4\x13\xbb\x49\x97\xf7\x51\xb6\x5a\x46\x59\xb1\xba\x0e\xf3\x68\xf5\x35\x2c\xee\xf0\xf9\x8a\xfd\x1a\x7d\x79\xfc\x16\x65\x79\x9c\x26\x57\xaf\x3f\x04\x97\xc1\x85\x3f\x67\xb7\xe9\x43\x98\xdc\xae\x8e\xfb\x97\xc1\xe5\x8f\xc1\x4f\x8c\xf6\x9d\xd2\x96\x31\xb1\x91\x15\x6d\xf0\x74\x17\x16\xec\xe3\x19\x69\xad\xf4\xb0\x70\xa5\xf3\xbf\x9f\xf5\x95\x6b\xe1\x5e\x74\x0a\xda\x2f\xb0\xe5\x2f\x04\xde\xb6\x03\xf2\x4a\x92\x3d\x67\x24\x2b\xb1\x61\xcc\xc3\x75\x2f\xda\x0a\xa5\xaa\x68\xfc\x09\x42\x5d\x9b\xc5\x30\x5b\x0e\xbc\xc0\x8d\xd0\x54\x5a\xa5\x0f\x90\x7c\x4b\xc6\x0d\xc5\xda\x51\x46\x74\xb4\xe7\xdb\xae\xa5\x91\x70\x6a\x53\xb6\xbd\xb1\xa4\x79\x27\x86\x5e\x48\x33\xde\xb6\x0b\x77\xee\x97\x4a\xba\xe1\x26\xcd\xd8\xc9\x72\xc1\x3e\x04\xb3\xa1\x3f\xbf\xb7\xa2\x0d\x4c\x33\x7a\xe1\xe3\x99\x93\x3c\xc7\xa4\x52\xe5\x0b\xe9\xc9\xc8\xeb\x7a\xd3\x2c\xd8\x87\x35\x37\x0d\xfc\x3d\xbe\x43\xd5\x54\xbb\xa1\x3c\x4c\xfe\x9f\xe1\x04\x8f\x84\xd1\x62\xd0\xf0\xb9\xac\x8e\x66\xff\x69\xb2\xe2\xb2\x5a\x8d\xe4\xdf\x1d\xf9\x0f\xc6\x82\xaf\x77\x69\xf2\x3c\x5c\x17\xef\x85\x70\x8c\xff\xaf\x3a\xfe\x0c\x00\x00\xff\xff\x1c\xe6\x7a\x6b\xaf\x04\x00\x00") func testImagesMakefileBytes() ([]byte, error) { return bindataRead( @@ -7177,7 +7240,7 @@ func testImagesSampleApiserverDockerfile_windows() (*asset, error) { return a, nil } -var _testImagesSampleApiserverMakefile = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x54\x61\x6f\xdb\x36\x10\xfd\x1c\xfe\x8a\x07\x2b\xe8\x9a\xc1\x92\xec\xa0\x68\x87\x14\xc6\xa6\xda\x9e\x63\x34\xb5\x06\xcb\x69\xd1\xa1\x40\x40\x49\x67\x89\x8b\x44\x6a\x24\x65\xd9\x08\xf2\xdf\x07\xca\x4e\xba\x22\x18\x30\x7f\x91\x25\xde\xbd\x7b\x7c\xf7\xee\x3c\x4c\x55\x73\xd0\xa2\x28\x2d\x2e\x47\x97\x23\x6c\x4a\xc2\xc7\x36\x25\x2d\xc9\x92\x41\xd4\xda\x52\x69\x13\x30\x8f\x79\xb8\x11\x19\x49\x43\x39\x5a\x99\x93\x86\x2d\x09\x51\xc3\xb3\x92\x9e\x4e\x86\xf8\x4c\xda\x08\x25\x71\x19\x8c\xf0\xda\x05\x0c\x4e\x47\x83\x8b\xf7\xcc\xc3\x41\xb5\xa8\xf9\x01\x52\x59\xb4\x86\x60\x4b\x61\xb0\x15\x15\x81\xf6\x19\x35\x16\x42\x22\x53\x75\x53\x09\x2e\x33\x42\x27\x6c\xd9\x97\x39\x81\x04\xcc\xc3\xd7\x13\x84\x4a\x2d\x17\x12\x1c\x99\x6a\x0e\x50\xdb\x7f\xc7\x81\xdb\x9e\xb0\xfb\x95\xd6\x36\x57\x61\xd8\x75\x5d\xc0\x7b\xb2\x81\xd2\x45\x58\x1d\x03\x4d\x78\xb3\x9c\xce\x57\xc9\xdc\xbf\x0c\x46\x7d\xca\xad\xac\xc8\x18\x68\xfa\xbb\x15\x9a\x72\xa4\x07\xf0\xa6\xa9\x44\xc6\xd3\x8a\x50\xf1\x0e\x4a\x83\x17\x9a\x28\x87\x55\x8e\x6f\xa7\x85\x15\xb2\x18\xc2\xa8\xad\xed\xb8\x26\xe6\x21\x17\xc6\x6a\x91\xb6\xf6\x07\xb1\x9e\xd8\x09\xf3\x43\x80\x92\xe0\x12\x83\x28\xc1\x32\x19\xe0\x43\x94\x2c\x93\x21\xf3\xf0\x65\xb9\xb9\x8e\x6f\x37\xf8\x12\xad\xd7\xd1\x6a\xb3\x9c\x27\x88\xd7\x98\xc6\xab\xd9\x72\xb3\x8c\x57\x09\xe2\xdf\x11\xad\xbe\xe2\xe3\x72\x35\x1b\x82\x84\x2d\x49\x83\xf6\x8d\x76\xfc\x95\x86\x70\x32\x52\xee\x34\x4b\x88\x7e\x20\xb0\x55\x47\x42\xa6\xa1\x4c\x6c\x45\x86\x8a\xcb\xa2\xe5\x05\xa1\x50\x3b\xd2\x52\xc8\x02\x0d\xe9\x5a\x18\xd7\x4c\x03\x2e\x73\xe6\xa1\x12\xb5\xb0\xdc\xf6\x5f\x5e\x5c\x2a\x60\x2c\x4e\xf0\xeb\x04\x95\x90\xed\x9e\x45\xeb\xe9\xb5\x7b\xe3\x75\xfe\xf6\x0d\xdb\x44\xeb\xc5\x7c\xe3\xde\xcf\x5f\x4f\x6f\xd7\xb3\xe5\xfa\x82\x2d\xe2\x68\xfd\x09\x13\xbc\x63\x8b\xf8\x26\x5a\x2d\xee\x3e\xcf\xd7\xc9\x32\x5e\xf5\x18\xdc\x92\xb1\x2c\x59\x4f\xef\x66\xcb\x35\x5c\x9a\x54\x36\x17\x1a\xe7\xaf\x4d\x49\x55\x85\xa6\xcb\x2f\x2e\x18\xed\x1b\xa5\x2d\x63\x1e\x3e\xb4\xa2\xca\xb1\x1b\x07\xe3\x77\xc1\xc8\x35\x86\xa4\x69\xf5\xf1\xd6\x59\xab\x35\x49\x0b\x4d\x15\x71\x43\x30\x6d\xe3\xd2\x0c\x38\x1a\x2d\x94\xc6\xee\x64\xda\x93\x89\x0c\xaf\x9b\x8a\xc0\x1b\x61\x48\xef\x48\x33\x0f\x0b\xb2\xbd\x19\x55\x6b\x91\xba\x52\x4e\x21\xab\xd0\xa8\xa6\x75\x64\x51\xab\xbc\xad\x08\x99\x33\x18\xf3\xdc\x14\xc9\x21\x8a\x53\x16\xe2\x24\x74\x82\xf8\xcf\x7a\x93\xdc\xb9\xf4\x1e\x8a\xa5\x42\x5e\xb1\xb3\x5c\x65\xf7\xa4\xa1\x5b\x09\xdf\xd7\x35\x7c\x01\x7f\x87\xc1\xf9\xc3\x51\xbc\xc7\xab\xef\xff\xfe\x1c\xe0\xfe\x17\x13\x14\x99\x0e\x84\x0a\x7b\x10\x5f\xd4\xbc\xa0\xf0\xbe\x4d\xc9\xcf\xb4\x32\xe6\xca\x69\xf1\x36\x78\xe3\x8f\xf1\x8d\x9d\x9d\x85\xa9\x90\x61\xca\x4d\x09\x3f\xc3\xc0\x7d\x39\xab\xef\x9d\xa0\x7e\x83\xb0\x50\xa1\xd1\x59\xff\x4c\x85\xc4\xab\x57\x7d\xca\xd9\x22\x1e\x8f\xc7\x9f\xe2\xd9\xed\xcd\x7c\xa2\x24\x0a\xd5\x5f\xc8\xcf\xfb\xe2\x42\x85\x47\x9d\xfc\x67\x9d\x7e\xdb\x8d\x8e\xf2\xff\x07\xc0\x74\x11\xdf\xcd\x57\xd1\x87\x9b\xf9\x6c\x32\x42\xdf\xff\xc9\xf9\x43\xff\x7c\xc4\x22\x8e\x93\xc9\xf9\x43\x9c\x3c\xf6\x27\xd3\xeb\xc9\xf9\x83\x7b\x3c\x3e\xd5\xfd\xbf\x45\xb7\x42\xe6\xcf\x57\xf1\x25\xaf\x9f\x1a\xfa\x3d\xe7\x67\xf8\xb4\xa7\x0c\x59\x83\x87\x47\x3c\xeb\xfa\x02\x1b\xdf\xde\x0f\x1c\xa4\xd7\x0f\x0c\xd7\xf5\xd0\x91\x11\xd2\x58\x5e\x55\x6e\x7f\x99\xa7\x42\x61\xef\xfa\xbb\x3e\xc4\x28\xfc\xd5\x1a\x8b\x9e\x88\xf3\x53\xbf\xdf\xb8\xcc\x8f\x9b\x4a\x58\xd7\x79\x5b\xd2\x11\x59\x2b\x65\x5d\x4a\xe7\xcc\x23\x9f\x43\x9c\xd1\xb6\x5a\xd5\xc7\x0d\x69\x4b\xad\x3a\xf0\x8e\x1f\x90\x29\xe9\xf6\x1e\xb9\xf9\x76\x03\xdb\x07\x71\x18\xcb\x65\xce\x75\x8e\x4a\x65\xfd\x88\x06\x47\xf8\x2f\x42\xe6\xaa\x33\x70\xf7\x6d\xad\xdb\x61\x06\x25\xdf\x11\x02\xda\xbb\xa5\x6b\x49\x3a\xeb\x0f\xd1\x95\x22\x2b\xdd\x66\xea\xca\x03\x3a\xfa\x49\x13\x0c\x71\x9d\x95\xce\xe9\x2f\x04\x64\x2c\xf8\xe3\x3a\x5e\x7d\xbd\x42\x2a\x24\xfb\x27\x00\x00\xff\xff\xc5\xbd\xc1\x5c\x45\x06\x00\x00") +var _testImagesSampleApiserverMakefile = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x54\x61\x6f\xdb\x36\x10\xfd\x1c\xfe\x8a\x07\x2b\xe8\x9a\xc1\x92\xec\x60\x68\xb7\x14\xc6\xa6\xda\x9e\x63\x34\xb5\x06\xcb\x69\xd1\xa1\x40\x40\x49\x67\x89\x8b\x44\x6a\x24\x65\xd9\x08\xf2\xdf\x07\xca\x4e\xba\x22\x18\x30\x7f\x91\x25\xde\xbd\x7b\xf7\xf8\xee\x3c\x4c\x55\x73\xd0\xa2\x28\x2d\x2e\x47\x97\x23\x6c\x4a\xc2\x87\x36\x25\x2d\xc9\x92\x41\xd4\xda\x52\x69\x13\x30\x8f\x79\xb8\x11\x19\x49\x43\x39\x5a\x99\x93\x86\x2d\x09\x51\xc3\xb3\x92\x9e\x4e\x86\xf8\x44\xda\x08\x25\x71\x19\x8c\xf0\xda\x05\x0c\x4e\x47\x83\x8b\x77\xcc\xc3\x41\xb5\xa8\xf9\x01\x52\x59\xb4\x86\x60\x4b\x61\xb0\x15\x15\x81\xf6\x19\x35\x16\x42\x22\x53\x75\x53\x09\x2e\x33\x42\x27\x6c\xd9\x97\x39\x81\x04\xcc\xc3\x97\x13\x84\x4a\x2d\x17\x12\x1c\x99\x6a\x0e\x50\xdb\x7f\xc7\x81\xdb\x9e\xb0\xfb\x95\xd6\x36\x57\x61\xd8\x75\x5d\xc0\x7b\xb2\x81\xd2\x45\x58\x1d\x03\x4d\x78\xb3\x9c\xce\x57\xc9\xdc\xbf\x0c\x46\x7d\xca\xad\xac\xc8\x18\x68\xfa\xbb\x15\x9a\x72\xa4\x07\xf0\xa6\xa9\x44\xc6\xd3\x8a\x50\xf1\x0e\x4a\x83\x17\x9a\x28\x87\x55\x8e\x6f\xa7\x85\x15\xb2\x18\xc2\xa8\xad\xed\xb8\x26\xe6\x21\x17\xc6\x6a\x91\xb6\xf6\x3b\xb1\x9e\xd8\x09\xf3\x5d\x80\x92\xe0\x12\x83\x28\xc1\x32\x19\xe0\x7d\x94\x2c\x93\x21\xf3\xf0\x79\xb9\xb9\x8e\x6f\x37\xf8\x1c\xad\xd7\xd1\x6a\xb3\x9c\x27\x88\xd7\x98\xc6\xab\xd9\x72\xb3\x8c\x57\x09\xe2\xdf\x11\xad\xbe\xe0\xc3\x72\x35\x1b\x82\x84\x2d\x49\x83\xf6\x8d\x76\xfc\x95\x86\x70\x32\x52\xee\x34\x4b\x88\xbe\x23\xb0\x55\x47\x42\xa6\xa1\x4c\x6c\x45\x86\x8a\xcb\xa2\xe5\x05\xa1\x50\x3b\xd2\x52\xc8\x02\x0d\xe9\x5a\x18\x77\x99\x06\x5c\xe6\xcc\x43\x25\x6a\x61\xb9\xed\xbf\xbc\x68\x2a\x60\x2c\x4e\xf0\xeb\x04\x95\x90\xed\x9e\x45\xeb\xe9\xb5\x7b\xe3\x75\xfe\xe6\x27\xb6\x89\xd6\x8b\xf9\xc6\xbd\x9f\xbf\x9e\xde\xae\x67\xcb\xf5\x05\x5b\xc4\xd1\xfa\x23\x26\x78\xcb\x16\xf1\x4d\xb4\x5a\xdc\x7d\x9a\xaf\x93\x65\xbc\xea\x31\xb8\x25\x63\x59\xb2\x9e\xde\xcd\x96\x6b\xb8\x34\xa9\x6c\x2e\x34\xce\x5f\x9b\x92\xaa\x0a\x4d\x97\x5f\x5c\x30\xda\x37\x4a\x5b\xc6\x3c\xbc\x6f\x45\x95\x63\x37\x0e\xc6\x6f\x83\x91\xbb\x18\x92\xa6\xd5\xc7\xae\xb3\x56\x6b\x92\x16\x9a\x2a\xe2\x86\x60\xda\xc6\xa5\x19\x70\x34\x5a\x28\x8d\xdd\xc9\xb4\x27\x13\x19\x5e\x37\x15\x81\x37\xc2\x90\xde\x91\x66\x1e\x16\x64\x7b\x33\xaa\xd6\x22\x75\xa5\x9c\x42\x56\xa1\x51\x4d\xeb\xc8\xa2\x56\x79\x5b\x11\x32\x67\x30\xe6\xb9\x29\x92\x43\x14\xa7\x2c\xc4\x49\xe8\x04\xf1\x9f\xf5\x26\xb9\x73\xe9\x3d\x14\x4b\x85\xbc\x62\x67\xb9\xca\xee\x49\x43\xb7\x12\xbe\xaf\x6b\xf8\x02\xfe\x0e\x83\xf3\x87\xa3\x78\x8f\x57\xdf\xfe\xfd\x39\xc0\xfd\xcf\x26\x28\x32\x1d\x08\x15\xf6\x20\xbe\xa8\x79\x41\xe1\x7d\x9b\x92\x9f\x69\x65\xcc\xd5\x6e\x1c\x5c\x8e\x83\x91\x5f\xa8\x71\x30\x7e\x13\xfc\xe2\xa7\xad\xb1\xa4\x83\x11\xbe\xb2\xb3\xb3\x30\x15\x32\x4c\xb9\x29\xe1\x67\x18\xb8\x2f\x67\xf5\xbd\x13\xd8\x6f\x10\x16\x2a\x34\x3a\xeb\x9f\xa9\x90\x78\xf5\xaa\x4f\x39\x5b\xc4\xe3\xf1\xf8\x63\x3c\xbb\xbd\x99\x4f\x94\x44\xa1\xfa\x06\xfd\xbc\x27\x23\x54\x78\xd4\xcd\x7f\xd6\xed\xb7\xdd\xe8\x78\x1d\xff\x01\x30\x5d\xc4\x77\xf3\x55\xf4\xfe\x66\x3e\x9b\x8c\xd0\xfb\x61\x72\xfe\xd0\x3f\x1f\xb1\x88\xe3\x64\x72\xfe\x10\x27\x8f\xfd\xc9\xf4\x7a\x72\xfe\xe0\x1e\x8f\x4f\x75\xff\x6f\xd1\xad\x90\xf9\x73\x2b\xbe\xe4\xf5\xd3\x05\x7f\xcb\xf9\x11\x3e\xed\x29\x43\xd6\xe0\xe1\x11\xcf\x3a\xbf\xc0\xc6\xd7\x77\x03\x07\xe9\xf5\x03\xc4\x75\x3d\x74\x64\x84\x34\x96\x57\x95\xdb\x67\xe6\xa9\x50\xd8\x4f\xc1\x5d\x1f\x62\x14\xfe\x6a\x8d\x45\x4f\xc4\xf9\xab\xdf\x77\x5c\xe6\xc7\xcd\x25\xac\x73\x82\x2d\xe9\x88\xac\x95\xb2\x2e\xa5\x73\x66\x92\xcf\x21\xce\x78\x5b\xad\xea\xe3\xc6\xb4\xa5\x56\x1d\x78\xc7\x0f\xc8\x94\x74\x7b\x90\xdc\xbc\xbb\x01\xee\x83\x38\x8c\xe5\x32\xe7\x3a\x47\xa5\xb2\x7e\x64\x83\x23\xfc\x67\x21\x73\xd5\x19\xb8\x7e\x5b\xeb\x76\x9a\x41\xc9\x77\x84\x80\xf6\x6e\x09\x5b\x92\x6e\x14\x86\xe8\x4a\x91\x95\x6e\x53\x75\xe5\x01\x1d\xfd\xa0\x09\x86\xb8\xce\x4a\xe7\xfc\x17\x02\x32\x16\xfc\x71\x1d\xaf\xbe\x5c\x21\x15\x92\xfd\x13\x00\x00\xff\xff\x4a\xe5\x21\xdc\x55\x06\x00\x00") func testImagesSampleApiserverMakefileBytes() ([]byte, error) { return bindataRead( @@ -8969,125 +9032,128 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "test/conformance/testdata/OWNERS": testConformanceTestdataOwners, - "test/conformance/testdata/conformance.yaml": testConformanceTestdataConformanceYaml, - "test/conformance/testdata/ineligible_endpoints.yaml": testConformanceTestdataIneligible_endpointsYaml, - "test/e2e/testing-manifests/flexvolume/attachable-with-long-mount": testE2eTestingManifestsFlexvolumeAttachableWithLongMount, - "test/e2e/testing-manifests/flexvolume/dummy": testE2eTestingManifestsFlexvolumeDummy, - "test/e2e/testing-manifests/flexvolume/dummy-attachable": testE2eTestingManifestsFlexvolumeDummyAttachable, - "test/e2e/testing-manifests/guestbook/agnhost-primary-deployment.yaml.in": testE2eTestingManifestsGuestbookAgnhostPrimaryDeploymentYamlIn, - "test/e2e/testing-manifests/guestbook/agnhost-primary-service.yaml": testE2eTestingManifestsGuestbookAgnhostPrimaryServiceYaml, - "test/e2e/testing-manifests/guestbook/agnhost-replica-deployment.yaml.in": testE2eTestingManifestsGuestbookAgnhostReplicaDeploymentYamlIn, - "test/e2e/testing-manifests/guestbook/agnhost-replica-service.yaml": testE2eTestingManifestsGuestbookAgnhostReplicaServiceYaml, - "test/e2e/testing-manifests/guestbook/frontend-deployment.yaml.in": testE2eTestingManifestsGuestbookFrontendDeploymentYamlIn, - "test/e2e/testing-manifests/guestbook/frontend-service.yaml": testE2eTestingManifestsGuestbookFrontendServiceYaml, - "test/e2e/testing-manifests/guestbook/legacy/frontend-controller.yaml": testE2eTestingManifestsGuestbookLegacyFrontendControllerYaml, - "test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml": testE2eTestingManifestsGuestbookLegacyRedisMasterControllerYaml, - "test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml": testE2eTestingManifestsGuestbookLegacyRedisSlaveControllerYaml, - "test/e2e/testing-manifests/guestbook/redis-master-deployment.yaml.in": testE2eTestingManifestsGuestbookRedisMasterDeploymentYamlIn, - "test/e2e/testing-manifests/guestbook/redis-master-service.yaml": testE2eTestingManifestsGuestbookRedisMasterServiceYaml, - "test/e2e/testing-manifests/guestbook/redis-slave-deployment.yaml.in": testE2eTestingManifestsGuestbookRedisSlaveDeploymentYamlIn, - "test/e2e/testing-manifests/guestbook/redis-slave-service.yaml": testE2eTestingManifestsGuestbookRedisSlaveServiceYaml, - "test/e2e/testing-manifests/ingress/gce/static-ip-2/ing.yaml": testE2eTestingManifestsIngressGceStaticIp2IngYaml, - "test/e2e/testing-manifests/ingress/gce/static-ip-2/rc.yaml": testE2eTestingManifestsIngressGceStaticIp2RcYaml, - "test/e2e/testing-manifests/ingress/gce/static-ip-2/svc.yaml": testE2eTestingManifestsIngressGceStaticIp2SvcYaml, - "test/e2e/testing-manifests/ingress/http/ing.yaml": testE2eTestingManifestsIngressHttpIngYaml, - "test/e2e/testing-manifests/ingress/http/rc.yaml": testE2eTestingManifestsIngressHttpRcYaml, - "test/e2e/testing-manifests/ingress/http/svc.yaml": testE2eTestingManifestsIngressHttpSvcYaml, - "test/e2e/testing-manifests/ingress/http2/ing.yaml": testE2eTestingManifestsIngressHttp2IngYaml, - "test/e2e/testing-manifests/ingress/http2/rc.yaml": testE2eTestingManifestsIngressHttp2RcYaml, - "test/e2e/testing-manifests/ingress/http2/svc.yaml": testE2eTestingManifestsIngressHttp2SvcYaml, - "test/e2e/testing-manifests/ingress/multiple-certs/ing.yaml": testE2eTestingManifestsIngressMultipleCertsIngYaml, - "test/e2e/testing-manifests/ingress/multiple-certs/rc.yaml": testE2eTestingManifestsIngressMultipleCertsRcYaml, - "test/e2e/testing-manifests/ingress/multiple-certs/svc.yaml": testE2eTestingManifestsIngressMultipleCertsSvcYaml, - "test/e2e/testing-manifests/ingress/neg/ing.yaml": testE2eTestingManifestsIngressNegIngYaml, - "test/e2e/testing-manifests/ingress/neg/rc.yaml": testE2eTestingManifestsIngressNegRcYaml, - "test/e2e/testing-manifests/ingress/neg/svc.yaml": testE2eTestingManifestsIngressNegSvcYaml, - "test/e2e/testing-manifests/ingress/neg-clusterip/ing.yaml": testE2eTestingManifestsIngressNegClusteripIngYaml, - "test/e2e/testing-manifests/ingress/neg-clusterip/rc.yaml": testE2eTestingManifestsIngressNegClusteripRcYaml, - "test/e2e/testing-manifests/ingress/neg-clusterip/svc.yaml": testE2eTestingManifestsIngressNegClusteripSvcYaml, - "test/e2e/testing-manifests/ingress/neg-exposed/ing.yaml": testE2eTestingManifestsIngressNegExposedIngYaml, - "test/e2e/testing-manifests/ingress/neg-exposed/rc.yaml": testE2eTestingManifestsIngressNegExposedRcYaml, - "test/e2e/testing-manifests/ingress/neg-exposed/svc.yaml": testE2eTestingManifestsIngressNegExposedSvcYaml, - "test/e2e/testing-manifests/ingress/nginx/rc.yaml": testE2eTestingManifestsIngressNginxRcYaml, - "test/e2e/testing-manifests/ingress/pre-shared-cert/ing.yaml": testE2eTestingManifestsIngressPreSharedCertIngYaml, - "test/e2e/testing-manifests/ingress/pre-shared-cert/rc.yaml": testE2eTestingManifestsIngressPreSharedCertRcYaml, - "test/e2e/testing-manifests/ingress/pre-shared-cert/svc.yaml": testE2eTestingManifestsIngressPreSharedCertSvcYaml, - "test/e2e/testing-manifests/ingress/static-ip/ing.yaml": testE2eTestingManifestsIngressStaticIpIngYaml, - "test/e2e/testing-manifests/ingress/static-ip/rc.yaml": testE2eTestingManifestsIngressStaticIpRcYaml, - "test/e2e/testing-manifests/ingress/static-ip/secret.yaml": testE2eTestingManifestsIngressStaticIpSecretYaml, - "test/e2e/testing-manifests/ingress/static-ip/svc.yaml": testE2eTestingManifestsIngressStaticIpSvcYaml, - "test/e2e/testing-manifests/kubectl/agnhost-primary-controller.json.in": testE2eTestingManifestsKubectlAgnhostPrimaryControllerJsonIn, - "test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml": testE2eTestingManifestsKubectlAgnhostPrimaryPodYaml, - "test/e2e/testing-manifests/kubectl/agnhost-primary-service.json": testE2eTestingManifestsKubectlAgnhostPrimaryServiceJson, - "test/e2e/testing-manifests/kubectl/busybox-cronjob.yaml.in": testE2eTestingManifestsKubectlBusyboxCronjobYamlIn, - "test/e2e/testing-manifests/kubectl/busybox-pod.yaml.in": testE2eTestingManifestsKubectlBusyboxPodYamlIn, - "test/e2e/testing-manifests/kubectl/httpd-deployment1.yaml.in": testE2eTestingManifestsKubectlHttpdDeployment1YamlIn, - "test/e2e/testing-manifests/kubectl/httpd-deployment2.yaml.in": testE2eTestingManifestsKubectlHttpdDeployment2YamlIn, - "test/e2e/testing-manifests/kubectl/httpd-deployment3.yaml.in": testE2eTestingManifestsKubectlHttpdDeployment3YamlIn, - "test/e2e/testing-manifests/kubectl/httpd-rc.yaml.in": testE2eTestingManifestsKubectlHttpdRcYamlIn, - "test/e2e/testing-manifests/kubectl/pause-pod.yaml.in": testE2eTestingManifestsKubectlPausePodYamlIn, - "test/e2e/testing-manifests/kubectl/pod-with-readiness-probe.yaml.in": testE2eTestingManifestsKubectlPodWithReadinessProbeYamlIn, - "test/e2e/testing-manifests/pod": testE2eTestingManifestsPod, - "test/e2e/testing-manifests/rbd-storage-class.yaml": testE2eTestingManifestsRbdStorageClassYaml, - "test/e2e/testing-manifests/sample-device-plugin.yaml": testE2eTestingManifestsSampleDevicePluginYaml, - "test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml": testE2eTestingManifestsSchedulingNvidiaDriverInstallerYaml, - "test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml": testE2eTestingManifestsServiceloadbalancerHaproxyrcYaml, - "test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml": testE2eTestingManifestsServiceloadbalancerNetexecrcYaml, - "test/e2e/testing-manifests/serviceloadbalancer/netexecsvc.yaml": testE2eTestingManifestsServiceloadbalancerNetexecsvcYaml, - "test/e2e/testing-manifests/serviceloadbalancer/nginxrc.yaml": testE2eTestingManifestsServiceloadbalancerNginxrcYaml, - "test/e2e/testing-manifests/serviceloadbalancer/nginxsvc.yaml": testE2eTestingManifestsServiceloadbalancerNginxsvcYaml, - "test/e2e/testing-manifests/statefulset/cassandra/controller.yaml": testE2eTestingManifestsStatefulsetCassandraControllerYaml, - "test/e2e/testing-manifests/statefulset/cassandra/pdb.yaml": testE2eTestingManifestsStatefulsetCassandraPdbYaml, - "test/e2e/testing-manifests/statefulset/cassandra/service.yaml": testE2eTestingManifestsStatefulsetCassandraServiceYaml, - "test/e2e/testing-manifests/statefulset/cassandra/statefulset.yaml": testE2eTestingManifestsStatefulsetCassandraStatefulsetYaml, - "test/e2e/testing-manifests/statefulset/cassandra/tester.yaml": testE2eTestingManifestsStatefulsetCassandraTesterYaml, - "test/e2e/testing-manifests/statefulset/cockroachdb/service.yaml": testE2eTestingManifestsStatefulsetCockroachdbServiceYaml, - "test/e2e/testing-manifests/statefulset/cockroachdb/statefulset.yaml": testE2eTestingManifestsStatefulsetCockroachdbStatefulsetYaml, - "test/e2e/testing-manifests/statefulset/etcd/pdb.yaml": testE2eTestingManifestsStatefulsetEtcdPdbYaml, - "test/e2e/testing-manifests/statefulset/etcd/service.yaml": testE2eTestingManifestsStatefulsetEtcdServiceYaml, - "test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml": testE2eTestingManifestsStatefulsetEtcdStatefulsetYaml, - "test/e2e/testing-manifests/statefulset/etcd/tester.yaml": testE2eTestingManifestsStatefulsetEtcdTesterYaml, - "test/e2e/testing-manifests/statefulset/mysql-galera/service.yaml": testE2eTestingManifestsStatefulsetMysqlGaleraServiceYaml, - "test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml": testE2eTestingManifestsStatefulsetMysqlGaleraStatefulsetYaml, - "test/e2e/testing-manifests/statefulset/mysql-upgrade/configmap.yaml": testE2eTestingManifestsStatefulsetMysqlUpgradeConfigmapYaml, - "test/e2e/testing-manifests/statefulset/mysql-upgrade/service.yaml": testE2eTestingManifestsStatefulsetMysqlUpgradeServiceYaml, - "test/e2e/testing-manifests/statefulset/mysql-upgrade/statefulset.yaml": testE2eTestingManifestsStatefulsetMysqlUpgradeStatefulsetYaml, - "test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml": testE2eTestingManifestsStatefulsetMysqlUpgradeTesterYaml, - "test/e2e/testing-manifests/statefulset/nginx/service.yaml": testE2eTestingManifestsStatefulsetNginxServiceYaml, - "test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml": testE2eTestingManifestsStatefulsetNginxStatefulsetYaml, - "test/e2e/testing-manifests/statefulset/redis/service.yaml": testE2eTestingManifestsStatefulsetRedisServiceYaml, - "test/e2e/testing-manifests/statefulset/redis/statefulset.yaml": testE2eTestingManifestsStatefulsetRedisStatefulsetYaml, - "test/e2e/testing-manifests/statefulset/zookeeper/service.yaml": testE2eTestingManifestsStatefulsetZookeeperServiceYaml, - "test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml": testE2eTestingManifestsStatefulsetZookeeperStatefulsetYaml, - "test/e2e/testing-manifests/storage-csi/OWNERS": testE2eTestingManifestsStorageCsiOwners, - "test/e2e/testing-manifests/storage-csi/controller-role.yaml": testE2eTestingManifestsStorageCsiControllerRoleYaml, - "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml": testE2eTestingManifestsStorageCsiExternalAttacherRbacYaml, - "test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml": testE2eTestingManifestsStorageCsiExternalProvisionerRbacYaml, - "test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml": testE2eTestingManifestsStorageCsiExternalResizerRbacYaml, - "test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml": testE2eTestingManifestsStorageCsiExternalSnapshotterRbacYaml, - "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml": testE2eTestingManifestsStorageCsiGcePdController_ssYaml, - "test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml": testE2eTestingManifestsStorageCsiGcePdCsiControllerRbacYaml, - "test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml": testE2eTestingManifestsStorageCsiGcePdNode_dsYaml, - "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-attacher.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathAttacherYaml, - "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathDriverinfoYaml, - "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathPluginYaml, - "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-provisioner.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathProvisionerYaml, - "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-resizer.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathResizerYaml, - "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-snapshotter.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathSnapshotterYaml, - "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYaml, - "test/e2e/testing-manifests/storage-csi/hostpath/usage/csi-storageclass.yaml": testE2eTestingManifestsStorageCsiHostpathUsageCsiStorageclassYaml, - "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml": testE2eTestingManifestsStorageCsiMockCsiMockDriverAttacherYaml, - "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml": testE2eTestingManifestsStorageCsiMockCsiMockDriverResizerYaml, - "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml": testE2eTestingManifestsStorageCsiMockCsiMockDriverSnapshotterYaml, - "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml": testE2eTestingManifestsStorageCsiMockCsiMockDriverYaml, - "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml": testE2eTestingManifestsStorageCsiMockCsiMockDriverinfoYaml, - "test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml": testE2eTestingManifestsStorageCsiMockCsiMockProxyYaml, - "test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml": testE2eTestingManifestsStorageCsiMockCsiMockRbacYaml, - "test/e2e/testing-manifests/storage-csi/mock/csi-storageclass.yaml": testE2eTestingManifestsStorageCsiMockCsiStorageclassYaml, - "test/e2e_node/testing-manifests/sriovdp-cm.yaml": testE2e_nodeTestingManifestsSriovdpCmYaml, - "test/e2e_node/testing-manifests/sriovdp-ds.yaml": testE2e_nodeTestingManifestsSriovdpDsYaml, - "test/e2e_node/testing-manifests/sriovdp-sa.yaml": testE2e_nodeTestingManifestsSriovdpSaYaml, + "test/conformance/testdata/OWNERS": testConformanceTestdataOwners, + "test/conformance/testdata/conformance.yaml": testConformanceTestdataConformanceYaml, + "test/conformance/testdata/ineligible_endpoints.yaml": testConformanceTestdataIneligible_endpointsYaml, + "test/e2e/testing-manifests/flexvolume/attachable-with-long-mount": testE2eTestingManifestsFlexvolumeAttachableWithLongMount, + "test/e2e/testing-manifests/flexvolume/dummy": testE2eTestingManifestsFlexvolumeDummy, + "test/e2e/testing-manifests/flexvolume/dummy-attachable": testE2eTestingManifestsFlexvolumeDummyAttachable, + "test/e2e/testing-manifests/guestbook/agnhost-primary-deployment.yaml.in": testE2eTestingManifestsGuestbookAgnhostPrimaryDeploymentYamlIn, + "test/e2e/testing-manifests/guestbook/agnhost-primary-service.yaml": testE2eTestingManifestsGuestbookAgnhostPrimaryServiceYaml, + "test/e2e/testing-manifests/guestbook/agnhost-replica-deployment.yaml.in": testE2eTestingManifestsGuestbookAgnhostReplicaDeploymentYamlIn, + "test/e2e/testing-manifests/guestbook/agnhost-replica-service.yaml": testE2eTestingManifestsGuestbookAgnhostReplicaServiceYaml, + "test/e2e/testing-manifests/guestbook/frontend-deployment.yaml.in": testE2eTestingManifestsGuestbookFrontendDeploymentYamlIn, + "test/e2e/testing-manifests/guestbook/frontend-service.yaml": testE2eTestingManifestsGuestbookFrontendServiceYaml, + "test/e2e/testing-manifests/guestbook/legacy/frontend-controller.yaml": testE2eTestingManifestsGuestbookLegacyFrontendControllerYaml, + "test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml": testE2eTestingManifestsGuestbookLegacyRedisMasterControllerYaml, + "test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml": testE2eTestingManifestsGuestbookLegacyRedisSlaveControllerYaml, + "test/e2e/testing-manifests/guestbook/redis-master-deployment.yaml.in": testE2eTestingManifestsGuestbookRedisMasterDeploymentYamlIn, + "test/e2e/testing-manifests/guestbook/redis-master-service.yaml": testE2eTestingManifestsGuestbookRedisMasterServiceYaml, + "test/e2e/testing-manifests/guestbook/redis-slave-deployment.yaml.in": testE2eTestingManifestsGuestbookRedisSlaveDeploymentYamlIn, + "test/e2e/testing-manifests/guestbook/redis-slave-service.yaml": testE2eTestingManifestsGuestbookRedisSlaveServiceYaml, + "test/e2e/testing-manifests/ingress/gce/static-ip-2/ing.yaml": testE2eTestingManifestsIngressGceStaticIp2IngYaml, + "test/e2e/testing-manifests/ingress/gce/static-ip-2/rc.yaml": testE2eTestingManifestsIngressGceStaticIp2RcYaml, + "test/e2e/testing-manifests/ingress/gce/static-ip-2/svc.yaml": testE2eTestingManifestsIngressGceStaticIp2SvcYaml, + "test/e2e/testing-manifests/ingress/http/ing.yaml": testE2eTestingManifestsIngressHttpIngYaml, + "test/e2e/testing-manifests/ingress/http/rc.yaml": testE2eTestingManifestsIngressHttpRcYaml, + "test/e2e/testing-manifests/ingress/http/svc.yaml": testE2eTestingManifestsIngressHttpSvcYaml, + "test/e2e/testing-manifests/ingress/http2/ing.yaml": testE2eTestingManifestsIngressHttp2IngYaml, + "test/e2e/testing-manifests/ingress/http2/rc.yaml": testE2eTestingManifestsIngressHttp2RcYaml, + "test/e2e/testing-manifests/ingress/http2/svc.yaml": testE2eTestingManifestsIngressHttp2SvcYaml, + "test/e2e/testing-manifests/ingress/multiple-certs/ing.yaml": testE2eTestingManifestsIngressMultipleCertsIngYaml, + "test/e2e/testing-manifests/ingress/multiple-certs/rc.yaml": testE2eTestingManifestsIngressMultipleCertsRcYaml, + "test/e2e/testing-manifests/ingress/multiple-certs/svc.yaml": testE2eTestingManifestsIngressMultipleCertsSvcYaml, + "test/e2e/testing-manifests/ingress/neg/ing.yaml": testE2eTestingManifestsIngressNegIngYaml, + "test/e2e/testing-manifests/ingress/neg/rc.yaml": testE2eTestingManifestsIngressNegRcYaml, + "test/e2e/testing-manifests/ingress/neg/svc.yaml": testE2eTestingManifestsIngressNegSvcYaml, + "test/e2e/testing-manifests/ingress/neg-clusterip/ing.yaml": testE2eTestingManifestsIngressNegClusteripIngYaml, + "test/e2e/testing-manifests/ingress/neg-clusterip/rc.yaml": testE2eTestingManifestsIngressNegClusteripRcYaml, + "test/e2e/testing-manifests/ingress/neg-clusterip/svc.yaml": testE2eTestingManifestsIngressNegClusteripSvcYaml, + "test/e2e/testing-manifests/ingress/neg-exposed/ing.yaml": testE2eTestingManifestsIngressNegExposedIngYaml, + "test/e2e/testing-manifests/ingress/neg-exposed/rc.yaml": testE2eTestingManifestsIngressNegExposedRcYaml, + "test/e2e/testing-manifests/ingress/neg-exposed/svc.yaml": testE2eTestingManifestsIngressNegExposedSvcYaml, + "test/e2e/testing-manifests/ingress/nginx/rc.yaml": testE2eTestingManifestsIngressNginxRcYaml, + "test/e2e/testing-manifests/ingress/pre-shared-cert/ing.yaml": testE2eTestingManifestsIngressPreSharedCertIngYaml, + "test/e2e/testing-manifests/ingress/pre-shared-cert/rc.yaml": testE2eTestingManifestsIngressPreSharedCertRcYaml, + "test/e2e/testing-manifests/ingress/pre-shared-cert/svc.yaml": testE2eTestingManifestsIngressPreSharedCertSvcYaml, + "test/e2e/testing-manifests/ingress/static-ip/ing.yaml": testE2eTestingManifestsIngressStaticIpIngYaml, + "test/e2e/testing-manifests/ingress/static-ip/rc.yaml": testE2eTestingManifestsIngressStaticIpRcYaml, + "test/e2e/testing-manifests/ingress/static-ip/secret.yaml": testE2eTestingManifestsIngressStaticIpSecretYaml, + "test/e2e/testing-manifests/ingress/static-ip/svc.yaml": testE2eTestingManifestsIngressStaticIpSvcYaml, + "test/e2e/testing-manifests/kubectl/agnhost-primary-controller.json.in": testE2eTestingManifestsKubectlAgnhostPrimaryControllerJsonIn, + "test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml": testE2eTestingManifestsKubectlAgnhostPrimaryPodYaml, + "test/e2e/testing-manifests/kubectl/agnhost-primary-service.json": testE2eTestingManifestsKubectlAgnhostPrimaryServiceJson, + "test/e2e/testing-manifests/kubectl/busybox-cronjob.yaml.in": testE2eTestingManifestsKubectlBusyboxCronjobYamlIn, + "test/e2e/testing-manifests/kubectl/busybox-pod.yaml.in": testE2eTestingManifestsKubectlBusyboxPodYamlIn, + "test/e2e/testing-manifests/kubectl/httpd-deployment1.yaml.in": testE2eTestingManifestsKubectlHttpdDeployment1YamlIn, + "test/e2e/testing-manifests/kubectl/httpd-deployment2.yaml.in": testE2eTestingManifestsKubectlHttpdDeployment2YamlIn, + "test/e2e/testing-manifests/kubectl/httpd-deployment3.yaml.in": testE2eTestingManifestsKubectlHttpdDeployment3YamlIn, + "test/e2e/testing-manifests/kubectl/httpd-rc.yaml.in": testE2eTestingManifestsKubectlHttpdRcYamlIn, + "test/e2e/testing-manifests/kubectl/pause-pod.yaml.in": testE2eTestingManifestsKubectlPausePodYamlIn, + "test/e2e/testing-manifests/kubectl/pod-with-readiness-probe.yaml.in": testE2eTestingManifestsKubectlPodWithReadinessProbeYamlIn, + "test/e2e/testing-manifests/pod": testE2eTestingManifestsPod, + "test/e2e/testing-manifests/rbd-storage-class.yaml": testE2eTestingManifestsRbdStorageClassYaml, + "test/e2e/testing-manifests/sample-device-plugin.yaml": testE2eTestingManifestsSampleDevicePluginYaml, + "test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml": testE2eTestingManifestsSchedulingNvidiaDriverInstallerYaml, + "test/e2e/testing-manifests/serviceloadbalancer/haproxyrc.yaml": testE2eTestingManifestsServiceloadbalancerHaproxyrcYaml, + "test/e2e/testing-manifests/serviceloadbalancer/netexecrc.yaml": testE2eTestingManifestsServiceloadbalancerNetexecrcYaml, + "test/e2e/testing-manifests/serviceloadbalancer/netexecsvc.yaml": testE2eTestingManifestsServiceloadbalancerNetexecsvcYaml, + "test/e2e/testing-manifests/serviceloadbalancer/nginxrc.yaml": testE2eTestingManifestsServiceloadbalancerNginxrcYaml, + "test/e2e/testing-manifests/serviceloadbalancer/nginxsvc.yaml": testE2eTestingManifestsServiceloadbalancerNginxsvcYaml, + "test/e2e/testing-manifests/statefulset/cassandra/controller.yaml": testE2eTestingManifestsStatefulsetCassandraControllerYaml, + "test/e2e/testing-manifests/statefulset/cassandra/pdb.yaml": testE2eTestingManifestsStatefulsetCassandraPdbYaml, + "test/e2e/testing-manifests/statefulset/cassandra/service.yaml": testE2eTestingManifestsStatefulsetCassandraServiceYaml, + "test/e2e/testing-manifests/statefulset/cassandra/statefulset.yaml": testE2eTestingManifestsStatefulsetCassandraStatefulsetYaml, + "test/e2e/testing-manifests/statefulset/cassandra/tester.yaml": testE2eTestingManifestsStatefulsetCassandraTesterYaml, + "test/e2e/testing-manifests/statefulset/cockroachdb/service.yaml": testE2eTestingManifestsStatefulsetCockroachdbServiceYaml, + "test/e2e/testing-manifests/statefulset/cockroachdb/statefulset.yaml": testE2eTestingManifestsStatefulsetCockroachdbStatefulsetYaml, + "test/e2e/testing-manifests/statefulset/etcd/pdb.yaml": testE2eTestingManifestsStatefulsetEtcdPdbYaml, + "test/e2e/testing-manifests/statefulset/etcd/service.yaml": testE2eTestingManifestsStatefulsetEtcdServiceYaml, + "test/e2e/testing-manifests/statefulset/etcd/statefulset.yaml": testE2eTestingManifestsStatefulsetEtcdStatefulsetYaml, + "test/e2e/testing-manifests/statefulset/etcd/tester.yaml": testE2eTestingManifestsStatefulsetEtcdTesterYaml, + "test/e2e/testing-manifests/statefulset/mysql-galera/service.yaml": testE2eTestingManifestsStatefulsetMysqlGaleraServiceYaml, + "test/e2e/testing-manifests/statefulset/mysql-galera/statefulset.yaml": testE2eTestingManifestsStatefulsetMysqlGaleraStatefulsetYaml, + "test/e2e/testing-manifests/statefulset/mysql-upgrade/configmap.yaml": testE2eTestingManifestsStatefulsetMysqlUpgradeConfigmapYaml, + "test/e2e/testing-manifests/statefulset/mysql-upgrade/service.yaml": testE2eTestingManifestsStatefulsetMysqlUpgradeServiceYaml, + "test/e2e/testing-manifests/statefulset/mysql-upgrade/statefulset.yaml": testE2eTestingManifestsStatefulsetMysqlUpgradeStatefulsetYaml, + "test/e2e/testing-manifests/statefulset/mysql-upgrade/tester.yaml": testE2eTestingManifestsStatefulsetMysqlUpgradeTesterYaml, + "test/e2e/testing-manifests/statefulset/nginx/service.yaml": testE2eTestingManifestsStatefulsetNginxServiceYaml, + "test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml": testE2eTestingManifestsStatefulsetNginxStatefulsetYaml, + "test/e2e/testing-manifests/statefulset/redis/service.yaml": testE2eTestingManifestsStatefulsetRedisServiceYaml, + "test/e2e/testing-manifests/statefulset/redis/statefulset.yaml": testE2eTestingManifestsStatefulsetRedisStatefulsetYaml, + "test/e2e/testing-manifests/statefulset/zookeeper/service.yaml": testE2eTestingManifestsStatefulsetZookeeperServiceYaml, + "test/e2e/testing-manifests/statefulset/zookeeper/statefulset.yaml": testE2eTestingManifestsStatefulsetZookeeperStatefulsetYaml, + "test/e2e/testing-manifests/storage-csi/OWNERS": testE2eTestingManifestsStorageCsiOwners, + "test/e2e/testing-manifests/storage-csi/controller-role.yaml": testE2eTestingManifestsStorageCsiControllerRoleYaml, + "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml": testE2eTestingManifestsStorageCsiExternalAttacherRbacYaml, + "test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-agent/rbac.yaml": testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorAgentRbacYaml, + "test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml": testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorControllerRbacYaml, + "test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml": testE2eTestingManifestsStorageCsiExternalProvisionerRbacYaml, + "test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml": testE2eTestingManifestsStorageCsiExternalResizerRbacYaml, + "test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml": testE2eTestingManifestsStorageCsiExternalSnapshotterCsiSnapshotterRbacCsiSnapshotterYaml, + "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml": testE2eTestingManifestsStorageCsiGcePdController_ssYaml, + "test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml": testE2eTestingManifestsStorageCsiGcePdCsiControllerRbacYaml, + "test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml": testE2eTestingManifestsStorageCsiGcePdNode_dsYaml, + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-attacher.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathAttacherYaml, + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathDriverinfoYaml, + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathPluginYaml, + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-provisioner.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathProvisionerYaml, + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-resizer.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathResizerYaml, + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-snapshotter.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathSnapshotterYaml, + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathTestingYaml, + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml": testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYaml, + "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml": testE2eTestingManifestsStorageCsiMockCsiMockDriverAttacherYaml, + "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml": testE2eTestingManifestsStorageCsiMockCsiMockDriverResizerYaml, + "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml": testE2eTestingManifestsStorageCsiMockCsiMockDriverSnapshotterYaml, + "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml": testE2eTestingManifestsStorageCsiMockCsiMockDriverYaml, + "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml": testE2eTestingManifestsStorageCsiMockCsiMockDriverinfoYaml, + "test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml": testE2eTestingManifestsStorageCsiMockCsiMockProxyYaml, + "test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml": testE2eTestingManifestsStorageCsiMockCsiMockRbacYaml, + "test/e2e/testing-manifests/storage-csi/mock/csi-storageclass.yaml": testE2eTestingManifestsStorageCsiMockCsiStorageclassYaml, + "test/e2e/testing-manifests/storage-csi/update-hostpath.sh": testE2eTestingManifestsStorageCsiUpdateHostpathSh, + "test/e2e_node/testing-manifests/sriovdp-cm.yaml": testE2e_nodeTestingManifestsSriovdpCmYaml, + "test/e2e_node/testing-manifests/sriovdp-ds.yaml": testE2e_nodeTestingManifestsSriovdpDsYaml, + "test/e2e_node/testing-manifests/sriovdp-sa.yaml": testE2e_nodeTestingManifestsSriovdpSaYaml, "test/images/Makefile": testImagesMakefile, "test/images/OWNERS": testImagesOwners, "test/images/agnhost/.gitignore": testImagesAgnhostGitignore, @@ -9591,6 +9657,14 @@ var _bintree = &bintree{nil, map[string]*bintree{ "external-attacher": {nil, map[string]*bintree{ "rbac.yaml": {testE2eTestingManifestsStorageCsiExternalAttacherRbacYaml, map[string]*bintree{}}, }}, + "external-health-monitor": {nil, map[string]*bintree{ + "external-health-monitor-agent": {nil, map[string]*bintree{ + "rbac.yaml": {testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorAgentRbacYaml, map[string]*bintree{}}, + }}, + "external-health-monitor-controller": {nil, map[string]*bintree{ + "rbac.yaml": {testE2eTestingManifestsStorageCsiExternalHealthMonitorExternalHealthMonitorControllerRbacYaml, map[string]*bintree{}}, + }}, + }}, "external-provisioner": {nil, map[string]*bintree{ "rbac.yaml": {testE2eTestingManifestsStorageCsiExternalProvisionerRbacYaml, map[string]*bintree{}}, }}, @@ -9598,7 +9672,9 @@ var _bintree = &bintree{nil, map[string]*bintree{ "rbac.yaml": {testE2eTestingManifestsStorageCsiExternalResizerRbacYaml, map[string]*bintree{}}, }}, "external-snapshotter": {nil, map[string]*bintree{ - "rbac.yaml": {testE2eTestingManifestsStorageCsiExternalSnapshotterRbacYaml, map[string]*bintree{}}, + "csi-snapshotter": {nil, map[string]*bintree{ + "rbac-csi-snapshotter.yaml": {testE2eTestingManifestsStorageCsiExternalSnapshotterCsiSnapshotterRbacCsiSnapshotterYaml, map[string]*bintree{}}, + }}, }}, "gce-pd": {nil, map[string]*bintree{ "controller_ss.yaml": {testE2eTestingManifestsStorageCsiGcePdController_ssYaml, map[string]*bintree{}}, @@ -9613,11 +9689,9 @@ var _bintree = &bintree{nil, map[string]*bintree{ "csi-hostpath-provisioner.yaml": {testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathProvisionerYaml, map[string]*bintree{}}, "csi-hostpath-resizer.yaml": {testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathResizerYaml, map[string]*bintree{}}, "csi-hostpath-snapshotter.yaml": {testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathSnapshotterYaml, map[string]*bintree{}}, + "csi-hostpath-testing.yaml": {testE2eTestingManifestsStorageCsiHostpathHostpathCsiHostpathTestingYaml, map[string]*bintree{}}, "e2e-test-rbac.yaml": {testE2eTestingManifestsStorageCsiHostpathHostpathE2eTestRbacYaml, map[string]*bintree{}}, }}, - "usage": {nil, map[string]*bintree{ - "csi-storageclass.yaml": {testE2eTestingManifestsStorageCsiHostpathUsageCsiStorageclassYaml, map[string]*bintree{}}, - }}, }}, "mock": {nil, map[string]*bintree{ "csi-mock-driver-attacher.yaml": {testE2eTestingManifestsStorageCsiMockCsiMockDriverAttacherYaml, map[string]*bintree{}}, @@ -9629,6 +9703,7 @@ var _bintree = &bintree{nil, map[string]*bintree{ "csi-mock-rbac.yaml": {testE2eTestingManifestsStorageCsiMockCsiMockRbacYaml, map[string]*bintree{}}, "csi-storageclass.yaml": {testE2eTestingManifestsStorageCsiMockCsiStorageclassYaml, map[string]*bintree{}}, }}, + "update-hostpath.sh": {testE2eTestingManifestsStorageCsiUpdateHostpathSh, map[string]*bintree{}}, }}, }}, }}, diff --git a/vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/metrics_grabber.go b/vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/metrics_grabber.go index 4ddede2d5d1b..0f77ba650a1d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/metrics_grabber.go +++ b/vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/metrics_grabber.go @@ -17,47 +17,34 @@ limitations under the License. package monitoring import ( - "context" + "errors" "fmt" - "strings" "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" - - "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { f := framework.NewDefaultFramework("metrics-grabber") var c, ec clientset.Interface var grabber *e2emetrics.Grabber - var masterRegistered bool ginkgo.BeforeEach(func() { var err error c = f.ClientSet ec = f.KubemarkExternalClusterClientSet - // Check if master Node is registered - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - framework.ExpectNoError(err) - for _, node := range nodes.Items { - if strings.HasSuffix(node.Name, "master") { - masterRegistered = true - } - } gomega.Eventually(func() error { - grabber, err = e2emetrics.NewMetricsGrabber(c, ec, true, true, true, true, true) + grabber, err = e2emetrics.NewMetricsGrabber(c, ec, f.ClientConfig(), true, true, true, true, true) if err != nil { return fmt.Errorf("failed to create metrics grabber: %v", err) } - if masterRegistered && !grabber.HasControlPlanePods() { - return fmt.Errorf("unable to get find control plane pods") - } return nil }, 5*time.Minute, 10*time.Second).Should(gomega.BeNil()) }) @@ -65,6 +52,9 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { ginkgo.It("should grab all metrics from API server.", func() { ginkgo.By("Connecting to /metrics endpoint") response, err := grabber.GrabFromAPIServer() + if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { + e2eskipper.Skipf("%v", err) + } framework.ExpectNoError(err) gomega.Expect(response).NotTo(gomega.BeEmpty()) }) @@ -72,6 +62,9 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { ginkgo.It("should grab all metrics from a Kubelet.", func() { ginkgo.By("Proxying to Node through the API server") node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { + e2eskipper.Skipf("%v", err) + } framework.ExpectNoError(err) response, err := grabber.GrabFromKubelet(node.Name) framework.ExpectNoError(err) @@ -80,22 +73,20 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() { ginkgo.It("should grab all metrics from a Scheduler.", func() { ginkgo.By("Proxying to Pod through the API server") - if !masterRegistered { - framework.Logf("Master is node api.Registry. Skipping testing Scheduler metrics.") - return - } response, err := grabber.GrabFromScheduler() + if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { + e2eskipper.Skipf("%v", err) + } framework.ExpectNoError(err) gomega.Expect(response).NotTo(gomega.BeEmpty()) }) ginkgo.It("should grab all metrics from a ControllerManager.", func() { ginkgo.By("Proxying to Pod through the API server") - if !masterRegistered { - framework.Logf("Master is node api.Registry. Skipping testing ControllerManager metrics.") - return - } response, err := grabber.GrabFromControllerManager() + if errors.Is(err, e2emetrics.MetricsGrabbingDisabledError) { + e2eskipper.Skipf("%v", err) + } framework.ExpectNoError(err) gomega.Expect(response).NotTo(gomega.BeEmpty()) }) diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go b/vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go index ef81ba83d108..5f0888607ce6 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/networking_perf.go @@ -39,8 +39,9 @@ import ( const ( // use this timeout for larger clusters largeClusterTimeout = 400 * time.Second - // iperf2BaselineBandwidthMegabytesPerSecond sets a baseline for iperf2 bandwidth of 90 MB/s - iperf2BaselineBandwidthMegabytesPerSecond = 90 + // iperf2BaselineBandwidthMegabytesPerSecond sets a baseline for iperf2 bandwidth of 10 MBps = 80 Mbps + // this limits is chosen in order to support small devices with 100 mbps cards. + iperf2BaselineBandwidthMegabytesPerSecond = 10 // iperf2Port selects an arbitrary, unique port to run iperf2's client and server on iperf2Port = 6789 // labelKey is used as a key for selectors diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/service.go b/vendor/k8s.io/kubernetes/test/e2e/network/service.go index cd1fbdb5684d..895ac8a7d14c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/service.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/service.go @@ -2373,15 +2373,19 @@ var _ = common.SIGDescribe("Services", func() { framework.Logf("Service %s has service status updated", testSvcName) ginkgo.By("patching the service") - servicePatchPayload, err := json.Marshal(v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "test-service": "patched", + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + servicePatchPayload, err := json.Marshal(v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test-service": "patched", + }, }, - }, - }) + }) + framework.ExpectNoError(err, "failed to marshal patch for Service %v in namespace %v", testSvcName, ns) - _, err = svcClient.Patch(context.TODO(), testSvcName, types.StrategicMergePatchType, []byte(servicePatchPayload), metav1.PatchOptions{}) + _, err = svcClient.Patch(context.TODO(), testSvcName, types.StrategicMergePatchType, []byte(servicePatchPayload), metav1.PatchOptions{}) + return err + }) framework.ExpectNoError(err, "failed to patch service. %v", err) ginkgo.By("watching for the Service to be patched") diff --git a/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go b/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go index 9a5696aa8fad..2fe6d895ca78 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go +++ b/vendor/k8s.io/kubernetes/test/e2e/scheduling/priorities.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "math" + "sync" "time" "github.com/onsi/ginkgo" @@ -33,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -527,6 +529,9 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n } } + errChan := make(chan error, len(nodes)) + var wg sync.WaitGroup + // we need the max one to keep the same cpu/mem use rate ratio = math.Max(maxCPUFraction, maxMemFraction) for _, node := range nodes { @@ -566,14 +571,27 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n }, }, } - - err := testutils.StartPods(cs, 1, ns, string(uuid.NewUUID()), - *initPausePod(f, *podConfig), true, framework.Logf) - + wg.Add(1) + go func() { + defer wg.Done() + err := testutils.StartPods(cs, 1, ns, string(uuid.NewUUID()), + *initPausePod(f, *podConfig), true, framework.Logf) + if err != nil { + errChan <- err + } + }() + } + wg.Wait() + close(errChan) + var errs []error + for err := range errChan { if err != nil { - return cleanUp, err + errs = append(errs, err) } } + if len(errs) > 0 { + return cleanUp, errors.NewAggregate(errs) + } nodeNameToPodList = podListForEachNode(cs) for _, node := range nodes { @@ -590,9 +608,9 @@ func podListForEachNode(cs clientset.Interface) map[string][]*v1.Pod { if err != nil { framework.Failf("Expect error of invalid, got : %v", err) } - for _, pod := range allPods.Items { + for i, pod := range allPods.Items { nodeName := pod.Spec.NodeName - nodeNameToPodList[nodeName] = append(nodeNameToPodList[nodeName], &pod) + nodeNameToPodList[nodeName] = append(nodeNameToPodList[nodeName], &allPods.Items[i]) } return nodeNameToPodList } diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service/service.go b/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service/service.go index 93edbf300ed4..3da7b22b6d63 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service/service.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service/service.go @@ -28,7 +28,7 @@ import ( "google.golang.org/grpc/codes" "k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/cache" - "github.com/golang/protobuf/ptypes" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -247,7 +247,7 @@ func (s *service) findVolByID( func (s *service) newSnapshot(name, sourceVolumeId string, parameters map[string]string) cache.Snapshot { - ptime := ptypes.TimestampNow() + ptime := timestamppb.Now() return cache.Snapshot{ Name: name, Parameters: parameters, diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi.go b/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi.go index 2035c8d6a11c..f06fac8161b5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/csi.go @@ -47,7 +47,9 @@ import ( "github.com/onsi/ginkgo" "google.golang.org/grpc/codes" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -141,8 +143,14 @@ func InitHostPathCSIDriver() storageframework.TestDriver { storageframework.CapBlock: true, storageframework.CapPVCDataSource: true, storageframework.CapControllerExpansion: true, + storageframework.CapOnlineExpansion: true, storageframework.CapSingleNodeVolume: true, - storageframework.CapVolumeLimits: true, + + // This is needed for the + // testsuites/volumelimits.go `should support volume limits` + // test. --maxvolumespernode=10 gets + // added when patching the deployment. + storageframework.CapVolumeLimits: true, } return initHostPathCSIDriver("csi-hostpath", capabilities, @@ -152,7 +160,8 @@ func InitHostPathCSIDriver() storageframework.TestDriver { }, "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml", - "test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml", + "test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml", + "test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml", "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-attacher.yaml", "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml", @@ -220,16 +229,47 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*storageframewo } o := utils.PatchCSIOptions{ - OldDriverName: h.driverInfo.Name, - NewDriverName: config.GetUniqueDriverName(), - DriverContainerName: "hostpath", - DriverContainerArguments: []string{"--drivername=" + config.GetUniqueDriverName()}, + OldDriverName: h.driverInfo.Name, + NewDriverName: config.GetUniqueDriverName(), + DriverContainerName: "hostpath", + DriverContainerArguments: []string{"--drivername=" + config.GetUniqueDriverName(), + // This is needed for the + // testsuites/volumelimits.go `should support volume limits` + // test. + "--maxvolumespernode=10", + }, ProvisionerContainerName: "csi-provisioner", SnapshotterContainerName: "csi-snapshotter", NodeName: node.Name, } cleanup, err := utils.CreateFromManifests(config.Framework, driverNamespace, func(item interface{}) error { - return utils.PatchCSIDeployment(config.Framework, o, item) + if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil { + return err + } + + // Remove csi-external-health-monitor-agent and + // csi-external-health-monitor-controller + // containers. They are not needed for any of the + // tests and in practice apparently caused enough + // overhead that even unrelated tests timed out. For + // example, in the pull-kubernetes-e2e-kind test, 43 + // out of 5771 tests failed, including tests from + // sig-node, sig-cli, sig-api-machinery, sig-network. + switch item := item.(type) { + case *appsv1.StatefulSet: + var containers []v1.Container + for _, container := range item.Spec.Template.Spec.Containers { + switch container.Name { + case "csi-external-health-monitor-agent", "csi-external-health-monitor-controller": + // Remove these containers. + default: + // Keep the others. + containers = append(containers, container) + } + } + item.Spec.Template.Spec.Containers = containers + } + return nil }, h.manifests...) if err != nil { @@ -408,7 +448,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) MockCSITestDriver { "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml", - "test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml", + "test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml", "test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml", "test/e2e/testing-manifests/storage-csi/mock/csi-storageclass.yaml", } @@ -611,7 +651,25 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.P FSGroupPolicy: m.fsGroupPolicy, } cleanup, err := utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error { - return utils.PatchCSIDeployment(f, o, item) + if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil { + return err + } + + switch item := item.(type) { + case *rbacv1.ClusterRole: + if strings.HasPrefix(item.Name, "external-snapshotter-runner") { + // Re-enable access to secrets for the snapshotter sidecar for + // https://github.com/kubernetes/kubernetes/blob/6ede5ca95f78478fa627ecfea8136e0dff34436b/test/e2e/storage/csi_mock_volume.go#L1539-L1548 + // It was disabled in https://github.com/kubernetes-csi/external-snapshotter/blob/501cc505846c03ee665355132f2da0ce7d5d747d/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml#L26-L32 + item.Rules = append(item.Rules, rbacv1.PolicyRule{ + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"get", "list"}, + }) + } + } + + return nil }, m.manifests...) if err != nil { @@ -739,6 +797,7 @@ func InitGcePDCSIDriver() storageframework.TestDriver { storageframework.CapVolumeLimits: false, storageframework.CapTopology: true, storageframework.CapControllerExpansion: true, + storageframework.CapOnlineExpansion: true, storageframework.CapNodeExpansion: true, storageframework.CapSnapshotDataSource: true, }, diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/in_tree.go b/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/in_tree.go index 787f2a396147..c2fc7d22f611 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/in_tree.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/in_tree.go @@ -1251,6 +1251,7 @@ func InitGcePdDriver() storageframework.TestDriver { storageframework.CapExec: true, storageframework.CapMultiPODs: true, storageframework.CapControllerExpansion: true, + storageframework.CapOnlineExpansion: true, storageframework.CapNodeExpansion: true, // GCE supports volume limits, but the test creates large // number of volumes and times out test suites. @@ -1692,6 +1693,7 @@ func InitAwsDriver() storageframework.TestDriver { storageframework.CapMultiPODs: true, storageframework.CapControllerExpansion: true, storageframework.CapNodeExpansion: true, + storageframework.CapOnlineExpansion: true, // AWS supports volume limits, but the test creates large // number of volumes and times out test suites. storageframework.CapVolumeLimits: false, diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/external/external.go b/vendor/k8s.io/kubernetes/test/e2e/storage/external/external.go index 80061031f8e4..be53fcb6306d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/external/external.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/external/external.go @@ -203,6 +203,14 @@ func loadDriverDefinition(filename string) (*driverDefinition, error) { if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, driver); err != nil { return nil, errors.Wrap(err, filename) } + + // to ensure backward compatibility if controller expansion is enabled then set online expansion to true + if _, ok := driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion]; !ok && + driver.GetDriverInfo().Capabilities[storageframework.CapControllerExpansion] { + caps := driver.DriverInfo.Capabilities + caps[storageframework.CapOnlineExpansion] = true + driver.DriverInfo.Capabilities = caps + } return driver, nil } diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/framework/snapshot_resource.go b/vendor/k8s.io/kubernetes/test/e2e/storage/framework/snapshot_resource.go index 026dea2e0a4e..e904375abe68 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/framework/snapshot_resource.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/framework/snapshot_resource.go @@ -104,10 +104,13 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Update(context.TODO(), r.Vscontent, metav1.UpdateOptions{}) framework.ExpectNoError(err) - ginkgo.By("recording the volume handle and snapshotHandle") + ginkgo.By("recording properties of the preprovisioned snapshot") snapshotHandle := r.Vscontent.Object["status"].(map[string]interface{})["snapshotHandle"].(string) - framework.Logf("Recording snapshot handle: %s", snapshotHandle) + framework.Logf("Recording snapshot content handle: %s", snapshotHandle) + snapshotContentAnnotations := r.Vscontent.GetAnnotations() + framework.Logf("Recording snapshot content annotations: %v", snapshotContentAnnotations) csiDriverName := r.Vsclass.Object["driver"].(string) + framework.Logf("Recording snapshot driver: %s", csiDriverName) // If the deletion policy is retain on vscontent: // when vs is deleted vscontent will not be deleted @@ -140,7 +143,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf snapName := getPreProvisionedSnapshotName(uuid) snapcontentName := getPreProvisionedSnapshotContentName(uuid) - r.Vscontent = getPreProvisionedSnapshotContent(snapcontentName, snapName, pvcNamespace, snapshotHandle, pattern.SnapshotDeletionPolicy.String(), csiDriverName) + r.Vscontent = getPreProvisionedSnapshotContent(snapcontentName, snapshotContentAnnotations, snapName, pvcNamespace, snapshotHandle, pattern.SnapshotDeletionPolicy.String(), csiDriverName) r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Create(context.TODO(), r.Vscontent, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -299,13 +302,14 @@ func getPreProvisionedSnapshot(snapName, ns, snapshotContentName string) *unstru return snapshot } -func getPreProvisionedSnapshotContent(snapcontentName, snapshotName, snapshotNamespace, snapshotHandle, deletionPolicy, csiDriverName string) *unstructured.Unstructured { +func getPreProvisionedSnapshotContent(snapcontentName string, snapshotContentAnnotations map[string]string, snapshotName, snapshotNamespace, snapshotHandle, deletionPolicy, csiDriverName string) *unstructured.Unstructured { snapshotContent := &unstructured.Unstructured{ Object: map[string]interface{}{ "kind": "VolumeSnapshotContent", "apiVersion": utils.SnapshotAPIVersion, "metadata": map[string]interface{}{ - "name": snapcontentName, + "name": snapcontentName, + "annotations": snapshotContentAnnotations, }, "spec": map[string]interface{}{ "source": map[string]interface{}{ diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/framework/testdriver.go b/vendor/k8s.io/kubernetes/test/e2e/storage/framework/testdriver.go index 0ce0fd5456d9..1b198a45bb26 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/framework/testdriver.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/framework/testdriver.go @@ -17,13 +17,14 @@ limitations under the License. package framework import ( - "k8s.io/api/core/v1" + "time" + + v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/test/e2e/framework" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" - "time" ) // TestDriver represents an interface for a driver to be tested in TestSuite. @@ -164,6 +165,7 @@ const ( CapRWX Capability = "RWX" // support ReadWriteMany access modes CapControllerExpansion Capability = "controllerExpansion" // support volume expansion for controller CapNodeExpansion Capability = "nodeExpansion" // support volume expansion for node + CapOnlineExpansion Capability = "onlineExpansion" // supports online volume expansion CapVolumeLimits Capability = "volumeLimits" // support volume limits (can be *very* slow) CapSingleNodeVolume Capability = "singleNodeVolume" // support volume that can run on single node (like hostpath) CapTopology Capability = "topology" // support topology diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes-local.go b/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes-local.go index 61a0202d5fcd..ae496b53ce65 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes-local.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/persistent_volumes-local.go @@ -373,11 +373,11 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }) ginkgo.It("should fail scheduling due to different NodeAffinity", func() { - testPodWithNodeConflict(config, volumeType, conflictNodeName, makeLocalPodWithNodeAffinity, immediateMode) + testPodWithNodeConflict(config, testVol, conflictNodeName, makeLocalPodWithNodeAffinity) }) ginkgo.It("should fail scheduling due to different NodeSelector", func() { - testPodWithNodeConflict(config, volumeType, conflictNodeName, makeLocalPodWithNodeSelector, immediateMode) + testPodWithNodeConflict(config, testVol, conflictNodeName, makeLocalPodWithNodeSelector) }) }) @@ -720,10 +720,8 @@ func deletePodAndPVCs(config *localTestConfig, pod *v1.Pod) error { type makeLocalPodWith func(config *localTestConfig, volume *localTestVolume, nodeName string) *v1.Pod -func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeType, nodeName string, makeLocalPodFunc makeLocalPodWith, bindingMode storagev1.VolumeBindingMode) { - ginkgo.By(fmt.Sprintf("local-volume-type: %s", testVolType)) - testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.randomNode, 1, bindingMode) - testVol := testVols[0] +func testPodWithNodeConflict(config *localTestConfig, testVol *localTestVolume, nodeName string, makeLocalPodFunc makeLocalPodWith) { + ginkgo.By(fmt.Sprintf("local-volume-type: %s", testVol.localVolumeType)) pod := makeLocalPodFunc(config, testVol, nodeName) pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/regional_pd.go b/vendor/k8s.io/kubernetes/test/e2e/storage/regional_pd.go index c92560c21b65..e2cace65ddb4 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/regional_pd.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/regional_pd.go @@ -154,14 +154,14 @@ func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext, for _, test := range tests { test.Client = c - test.Class = newStorageClass(test, ns, "" /* suffix */) + computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "" /* suffix */)) + defer clearStorageClass() + test.Class = computedStorageClass test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ ClaimSize: test.ClaimSize, StorageClassName: &(test.Class.Name), VolumeMode: &test.VolumeMode, }, ns) - _, clearStorageClass := testsuites.SetupStorageClass(test.Client, test.Class) - defer clearStorageClass() test.TestDynamicProvisioning() } @@ -343,7 +343,10 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) } suffix := "delayed-regional" - test.Class = newStorageClass(test, ns, suffix) + + computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) + defer clearStorageClass() + test.Class = computedStorageClass var claims []*v1.PersistentVolumeClaim for i := 0; i < pvcCount; i++ { claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ @@ -381,7 +384,9 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) { suffix := "topo-regional" test.Client = c - test.Class = newStorageClass(test, ns, suffix) + computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) + defer clearStorageClass() + test.Class = computedStorageClass zones := getTwoRandomZones(c) addAllowedTopologiesToStorageClass(c, test.Class, zones) test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ @@ -391,9 +396,6 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) { VolumeMode: &test.VolumeMode, }, ns) - _, clearStorageClass := testsuites.SetupStorageClass(test.Client, test.Class) - defer clearStorageClass() - pv := test.TestDynamicProvisioning() checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true) } @@ -413,7 +415,9 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s } suffix := "topo-delayed-regional" - test.Class = newStorageClass(test, ns, suffix) + computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) + defer clearStorageClass() + test.Class = computedStorageClass topoZones := getTwoRandomZones(c) addAllowedTopologiesToStorageClass(c, test.Class, topoZones) var claims []*v1.PersistentVolumeClaim diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/base.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/base.go index 4eb34ca89153..c5911db9e6b0 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/base.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/base.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/component-base/metrics/testutil" csitrans "k8s.io/csi-translation-lib" "k8s.io/kubernetes/test/e2e/framework" @@ -43,6 +44,7 @@ type opCounts map[string]int64 // migrationOpCheck validates migrated metrics. type migrationOpCheck struct { cs clientset.Interface + config *rest.Config pluginName string skipCheck bool @@ -100,14 +102,14 @@ func getVolumeOpsFromMetricsForPlugin(ms testutil.Metrics, pluginName string) op return totOps } -func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts { +func getVolumeOpCounts(c clientset.Interface, config *rest.Config, pluginName string) opCounts { if !framework.ProviderIs("gce", "gke", "aws") { return opCounts{} } nodeLimit := 25 - metricsGrabber, err := e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false) + metricsGrabber, err := e2emetrics.NewMetricsGrabber(c, nil, config, true, false, true, false, false) if err != nil { framework.ExpectNoError(err, "Error creating metrics grabber: %v", err) @@ -156,7 +158,7 @@ func addOpCounts(o1 opCounts, o2 opCounts) opCounts { return totOps } -func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCounts, opCounts) { +func getMigrationVolumeOpCounts(cs clientset.Interface, config *rest.Config, pluginName string) (opCounts, opCounts) { if len(pluginName) > 0 { var migratedOps opCounts l := csitrans.New() @@ -166,18 +168,19 @@ func getMigrationVolumeOpCounts(cs clientset.Interface, pluginName string) (opCo migratedOps = opCounts{} } else { csiName = "kubernetes.io/csi:" + csiName - migratedOps = getVolumeOpCounts(cs, csiName) + migratedOps = getVolumeOpCounts(cs, config, csiName) } - return getVolumeOpCounts(cs, pluginName), migratedOps + return getVolumeOpCounts(cs, config, pluginName), migratedOps } // Not an in-tree driver framework.Logf("Test running for native CSI Driver, not checking metrics") return opCounts{}, opCounts{} } -func newMigrationOpCheck(cs clientset.Interface, pluginName string) *migrationOpCheck { +func newMigrationOpCheck(cs clientset.Interface, config *rest.Config, pluginName string) *migrationOpCheck { moc := migrationOpCheck{ cs: cs, + config: config, pluginName: pluginName, } if len(pluginName) == 0 { @@ -206,7 +209,7 @@ func newMigrationOpCheck(cs clientset.Interface, pluginName string) *migrationOp moc.skipCheck = true return &moc } - moc.oldInTreeOps, moc.oldMigratedOps = getMigrationVolumeOpCounts(cs, pluginName) + moc.oldInTreeOps, moc.oldMigratedOps = getMigrationVolumeOpCounts(cs, config, pluginName) return &moc } @@ -215,7 +218,7 @@ func (moc *migrationOpCheck) validateMigrationVolumeOpCounts() { return } - newInTreeOps, _ := getMigrationVolumeOpCounts(moc.cs, moc.pluginName) + newInTreeOps, _ := getMigrationVolumeOpCounts(moc.cs, moc.config, moc.pluginName) for op, count := range newInTreeOps { if count != moc.oldInTreeOps[op] { diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/multivolume.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/multivolume.go index 520fec4465c5..a9fe04fcdb9a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/multivolume.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/multivolume.go @@ -111,7 +111,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) + l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) } cleanup := func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/provisioning.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/provisioning.go index 5bc00446459b..bedb0a573a5e 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/provisioning.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/provisioning.go @@ -134,7 +134,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, dDriver, _ = driver.(storageframework.DynamicPVTestDriver) // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) + l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) l.cs = l.config.Framework.ClientSet testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange @@ -596,14 +596,6 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P framework.ExpectNotEqual(len(claims), 0) namespace := claims[0].Namespace - ginkgo.By("creating a storage class " + t.Class.Name) - class, err := t.Client.StorageV1().StorageClasses().Create(context.TODO(), t.Class, metav1.CreateOptions{}) - framework.ExpectNoError(err) - defer func() { - err = storageutils.DeleteStorageClass(t.Client, class.Name) - framework.ExpectNoError(err, "While deleting storage class") - }() - ginkgo.By("creating claims") var claimNames []string var createdClaims []*v1.PersistentVolumeClaim diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable.go index af969b2ae45b..ab8ed97ea005 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/snapshottable.go @@ -317,11 +317,15 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy") - // Delete both Snapshot and PVC at the same time because different storage systems - // have different ordering of deletion. Some may require delete PVC first before + // Delete both Snapshot and restored Pod/PVC at the same time because different storage systems + // have different ordering of deletion. Some may require delete the restored PVC first before // Snapshot deletion and some are opposite. err = storageutils.DeleteSnapshotWithoutWaiting(dc, vs.GetNamespace(), vs.GetName()) framework.ExpectNoError(err) + framework.Logf("deleting restored pod %q/%q", restoredPod.Namespace, restoredPod.Name) + err = cs.CoreV1().Pods(restoredPod.Namespace).Delete(context.TODO(), restoredPod.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err) + framework.Logf("deleting restored PVC %q/%q", restoredPVC.Namespace, restoredPVC.Name) err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(context.TODO(), restoredPVC.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go index bb853aa3383f..b0ebd70fd603 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/subpath.go @@ -122,7 +122,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, driver.GetDriverInfo().InTreePluginName) + l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName) testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) l.hostExec = utils.NewHostExec(f) @@ -346,6 +346,11 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte init() defer cleanup() + if strings.HasPrefix(driverName, "hostPath") { + // TODO: This skip should be removed once #61446 is fixed + e2eskipper.Skipf("Driver %s does not support reconstruction, skipping", driverName) + } + testSubpathReconstruction(f, l.hostExec, l.pod, false) }) diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/topology.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/topology.go index ac0f826f415c..1d24ffdb6a0b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/topology.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/topology.go @@ -148,7 +148,7 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt StorageClassName: &(l.resource.Sc.Name), }, l.config.Framework.Namespace.Name) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) + l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) return l } diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_expand.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_expand.go index 472982c7e130..704f4bd2aa30 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_expand.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_expand.go @@ -121,7 +121,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, driver.GetDriverInfo().InTreePluginName) + l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName) testVolumeSizeRange := v.GetTestSuiteInfo().SupportedSizeRange l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) } @@ -247,6 +247,10 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, init() defer cleanup() + if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] { + e2eskipper.Skipf("Driver %q does not support online volume expansion - skipping", driver.GetDriverInfo().Name) + } + var err error ginkgo.By("Creating a pod with dynamically provisioned volume") podConfig := e2epod.Config{ diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go index 23aa911e2de4..ad4c627d66fe 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_io.go @@ -117,7 +117,7 @@ func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, patt // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) + l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_stress.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_stress.go index 5741f50fe9aa..289c9d154ded 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_stress.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_stress.go @@ -120,7 +120,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) + l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) l.volumes = []*storageframework.VolumeResource{} l.pods = []*v1.Pod{} l.testOptions = *dInfo.StressTestOptions diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go index 48df518457ce..65115aa36791 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go @@ -115,7 +115,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) + l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) } // manualInit initializes l.VolumeResource without creating the PV & PVC objects. diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go index ab9799497f50..cef61af2c3e4 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volumes.go @@ -135,7 +135,7 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) - l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) + l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) if l.resource.VolSource == nil { diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/ubernetes_lite_volumes.go b/vendor/k8s.io/kubernetes/test/e2e/storage/ubernetes_lite_volumes.go index 9cc20d7aa4b7..5aa27c7850d7 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/ubernetes_lite_volumes.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/ubernetes_lite_volumes.go @@ -19,20 +19,13 @@ package storage import ( "context" "fmt" - "strconv" - "github.com/onsi/ginkgo" - compute "google.golang.org/api/compute/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - "k8s.io/kubernetes/test/e2e/framework/providers/gce" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -57,130 +50,8 @@ var _ = utils.SIGDescribe("Multi-AZ Cluster Volumes", func() { ginkgo.It("should schedule pods in the same zones as statically provisioned PVs", func() { PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image) }) - - ginkgo.It("should only be allowed to provision PDs in zones where nodes exist", func() { - OnlyAllowNodeZones(f, zoneCount, image) - }) }) -// OnlyAllowNodeZones tests that PDs are only provisioned in zones with nodes. -func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { - gceCloud, err := gce.GetGCECloud() - framework.ExpectNoError(err) - - // Get all the zones that the nodes are in - expectedZones, err := gceCloud.GetAllZonesFromCloudProvider() - framework.ExpectNoError(err) - framework.Logf("Expected zones: %v", expectedZones) - - // Get all the zones in this current region - region := gceCloud.Region() - allZonesInRegion, err := gceCloud.ListZonesInRegion(region) - framework.ExpectNoError(err) - - var extraZone string - for _, zone := range allZonesInRegion { - if !expectedZones.Has(zone.Name) { - extraZone = zone.Name - break - } - } - - if extraZone == "" { - e2eskipper.Skipf("All zones in region %s have compute instances, no extra zones available", region) - } - - ginkgo.By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone)) - project := framework.TestContext.CloudConfig.ProjectID - zone := extraZone - myuuid := string(uuid.NewUUID()) - name := "compute-" + myuuid - imageURL := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140606" - - rb := &compute.Instance{ - MachineType: "zones/" + zone + "/machineTypes/f1-micro", - Disks: []*compute.AttachedDisk{ - { - AutoDelete: true, - Boot: true, - Type: "PERSISTENT", - InitializeParams: &compute.AttachedDiskInitializeParams{ - DiskName: "my-root-pd-" + myuuid, - SourceImage: imageURL, - }, - }, - }, - NetworkInterfaces: []*compute.NetworkInterface{ - { - AccessConfigs: []*compute.AccessConfig{ - { - Type: "ONE_TO_ONE_NAT", - Name: "External NAT", - }, - }, - Network: "/global/networks/default", - }, - }, - Name: name, - } - - err = gceCloud.InsertInstance(project, zone, rb) - framework.ExpectNoError(err) - - defer func() { - // Teardown of the compute instance - framework.Logf("Deleting compute resource: %v", name) - err := gceCloud.DeleteInstance(project, zone, name) - framework.ExpectNoError(err) - }() - - ginkgo.By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes") - // Create some (zoneCount+1) PVCs with names of form "pvc-x" where x is 1...zoneCount+1 - // This will exploit ChooseZoneForVolume in pkg/volume/util.go to provision them in all the zones it "sees" - var pvcList []*v1.PersistentVolumeClaim - c := f.ClientSet - ns := f.Namespace.Name - - for index := 1; index <= zoneCount+1; index++ { - pvc := newNamedDefaultClaim(ns, index) - pvc, err = e2epv.CreatePVC(c, ns, pvc) - framework.ExpectNoError(err) - pvcList = append(pvcList, pvc) - - // Defer the cleanup - defer func() { - framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) - err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) - if err != nil { - framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) - } - }() - } - - // Wait for all claims bound - for _, claim := range pvcList { - err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) - framework.ExpectNoError(err) - } - - pvZones := sets.NewString() - ginkgo.By("Checking that PDs have been provisioned in only the expected zones") - for _, claim := range pvcList { - // Get a new copy of the claim to have all fields populated - claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) - framework.ExpectNoError(err) - - // Get the related PV - pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) - framework.ExpectNoError(err) - - pvZone, ok := pv.ObjectMeta.Labels[v1.LabelFailureDomainBetaZone] - framework.ExpectEqual(ok, true, "PV has no LabelZone to be found") - pvZones.Insert(pvZone) - } - framework.ExpectEqual(pvZones.Equal(expectedZones), true, fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones)) -} - // Return the number of zones in which we have nodes in this cluster. func getZoneCount(c clientset.Interface) (int, error) { zoneNames, err := e2enode.GetClusterZones(c) @@ -261,24 +132,3 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) framework.ExpectNoError(err) } } - -func newNamedDefaultClaim(ns string, index int) *v1.PersistentVolumeClaim { - claim := v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-" + strconv.Itoa(index), - Namespace: ns, - }, - Spec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{ - v1.ReadWriteOnce, - }, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"), - }, - }, - }, - } - - return &claim -} diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/volume_metrics.go b/vendor/k8s.io/kubernetes/test/e2e/storage/volume_metrics.go index 330939eac65c..4404f9cae810 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/volume_metrics.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/volume_metrics.go @@ -81,7 +81,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { VolumeMode: &blockMode, }, ns) - metricsGrabber, err = e2emetrics.NewMetricsGrabber(c, nil, true, false, true, false, false) + metricsGrabber, err = e2emetrics.NewMetricsGrabber(c, nil, f.ClientConfig(), true, false, true, false, false) if err != nil { framework.Failf("Error creating metrics grabber : %v", err) diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go b/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go index 9b01c741879c..ac0eacf3f84d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/volume_provisioning.go @@ -825,6 +825,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ginkgo.It("should report an error and create no PV", func() { e2eskipper.SkipUnlessProviderIs("aws") test := testsuites.StorageClassTest{ + Client: c, Name: "AWS EBS with invalid KMS key", Provisioner: "kubernetes.io/aws-ebs", Timeouts: f.Timeouts, diff --git a/vendor/k8s.io/kubernetes/test/e2e/suites.go b/vendor/k8s.io/kubernetes/test/e2e/suites.go index d14da6dfe91d..ec170e84dc8d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/suites.go +++ b/vendor/k8s.io/kubernetes/test/e2e/suites.go @@ -22,6 +22,7 @@ import ( "path" "time" + "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" ) @@ -55,13 +56,17 @@ func AfterSuiteActions() { func gatherTestSuiteMetrics() error { framework.Logf("Gathering metrics") - c, err := framework.LoadClientset() + config, err := framework.LoadConfig() if err != nil { - return fmt.Errorf("error loading client: %v", err) + return fmt.Errorf("error loading client config: %v", err) + } + c, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("error creating client: %v", err) } // Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally). - grabber, err := e2emetrics.NewMetricsGrabber(c, nil, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics) + grabber, err := e2emetrics.NewMetricsGrabber(c, nil, config, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics) if err != nil { return fmt.Errorf("failed to create MetricsGrabber: %v", err) } diff --git a/vendor/k8s.io/kubernetes/test/e2e/upgrades/upgrade_suite.go b/vendor/k8s.io/kubernetes/test/e2e/upgrades/upgrade_suite.go index 89f1a9068423..eac3b0f5a668 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/upgrades/upgrade_suite.go +++ b/vendor/k8s.io/kubernetes/test/e2e/upgrades/upgrade_suite.go @@ -95,7 +95,7 @@ func FinalizeUpgradeTest(start time.Time, tc *junit.TestCase) { } } -func createUpgradeFrameworks(tests []Test) map[string]*framework.Framework { +func CreateUpgradeFrameworks(tests []Test) map[string]*framework.Framework { nsFilter := regexp.MustCompile("[^[:word:]-]+") // match anything that's not a word character or hyphen testFrameworks := map[string]*framework.Framework{} for _, t := range tests { @@ -110,12 +110,11 @@ func createUpgradeFrameworks(tests []Test) map[string]*framework.Framework { func RunUpgradeSuite( upgCtx *UpgradeContext, tests []Test, + testFrameworks map[string]*framework.Framework, testSuite *junit.TestSuite, upgradeType UpgradeType, upgradeFunc func(), ) { - testFrameworks := createUpgradeFrameworks(tests) - cm := chaosmonkey.New(upgradeFunc) for _, t := range tests { testCase := &junit.TestCase{ diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 66348582dd53..2b77d94eeeb7 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -30,18 +30,19 @@ import ( // RegistryList holds public and private image registries type RegistryList struct { - GcAuthenticatedRegistry string `yaml:"gcAuthenticatedRegistry"` - E2eRegistry string `yaml:"e2eRegistry"` - PromoterE2eRegistry string `yaml:"promoterE2eRegistry"` - BuildImageRegistry string `yaml:"buildImageRegistry"` - InvalidRegistry string `yaml:"invalidRegistry"` - GcEtcdRegistry string `yaml:"gcEtcdRegistry"` - GcRegistry string `yaml:"gcRegistry"` - SigStorageRegistry string `yaml:"sigStorageRegistry"` - GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"` - PrivateRegistry string `yaml:"privateRegistry"` - SampleRegistry string `yaml:"sampleRegistry"` - MicrosoftRegistry string `yaml:"microsoftRegistry"` + GcAuthenticatedRegistry string `yaml:"gcAuthenticatedRegistry"` + E2eRegistry string `yaml:"e2eRegistry"` + PromoterE2eRegistry string `yaml:"promoterE2eRegistry"` + BuildImageRegistry string `yaml:"buildImageRegistry"` + InvalidRegistry string `yaml:"invalidRegistry"` + GcEtcdRegistry string `yaml:"gcEtcdRegistry"` + GcRegistry string `yaml:"gcRegistry"` + SigStorageRegistry string `yaml:"sigStorageRegistry"` + GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"` + PrivateRegistry string `yaml:"privateRegistry"` + SampleRegistry string `yaml:"sampleRegistry"` + MicrosoftRegistry string `yaml:"microsoftRegistry"` + CloudProviderGcpRegistry string `yaml:"cloudProviderGcpRegistry"` } // Config holds an images registry, name, and version @@ -68,18 +69,19 @@ func (i *Config) SetVersion(version string) { func initReg() RegistryList { registry := RegistryList{ - GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling", - E2eRegistry: "gcr.io/kubernetes-e2e-test-images", - PromoterE2eRegistry: "k8s.gcr.io/e2e-test-images", - BuildImageRegistry: "k8s.gcr.io/build-image", - InvalidRegistry: "invalid.com/invalid", - GcEtcdRegistry: "k8s.gcr.io", - GcRegistry: "k8s.gcr.io", - SigStorageRegistry: "k8s.gcr.io/sig-storage", - PrivateRegistry: "gcr.io/k8s-authenticated-test", - SampleRegistry: "gcr.io/google-samples", - GcrReleaseRegistry: "gcr.io/gke-release", - MicrosoftRegistry: "mcr.microsoft.com", + GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling", + E2eRegistry: "gcr.io/kubernetes-e2e-test-images", + PromoterE2eRegistry: "k8s.gcr.io/e2e-test-images", + BuildImageRegistry: "k8s.gcr.io/build-image", + InvalidRegistry: "invalid.com/invalid", + GcEtcdRegistry: "k8s.gcr.io", + GcRegistry: "k8s.gcr.io", + SigStorageRegistry: "k8s.gcr.io/sig-storage", + PrivateRegistry: "gcr.io/k8s-authenticated-test", + SampleRegistry: "gcr.io/google-samples", + GcrReleaseRegistry: "gcr.io/gke-release", + MicrosoftRegistry: "mcr.microsoft.com", + CloudProviderGcpRegistry: "k8s.gcr.io/cloud-provider-gcp", } repoList := os.Getenv("KUBE_TEST_REPO_LIST") if repoList == "" { @@ -105,18 +107,19 @@ var ( PrivateRegistry = registry.PrivateRegistry // Preconfigured image configs - dockerLibraryRegistry = "docker.io/library" - e2eRegistry = registry.E2eRegistry - promoterE2eRegistry = registry.PromoterE2eRegistry - buildImageRegistry = registry.BuildImageRegistry - gcAuthenticatedRegistry = registry.GcAuthenticatedRegistry - gcEtcdRegistry = registry.GcEtcdRegistry - gcRegistry = registry.GcRegistry - sigStorageRegistry = registry.SigStorageRegistry - gcrReleaseRegistry = registry.GcrReleaseRegistry - invalidRegistry = registry.InvalidRegistry - sampleRegistry = registry.SampleRegistry - microsoftRegistry = registry.MicrosoftRegistry + dockerLibraryRegistry = "docker.io/library" + e2eRegistry = registry.E2eRegistry + promoterE2eRegistry = registry.PromoterE2eRegistry + buildImageRegistry = registry.BuildImageRegistry + gcAuthenticatedRegistry = registry.GcAuthenticatedRegistry + gcEtcdRegistry = registry.GcEtcdRegistry + gcRegistry = registry.GcRegistry + sigStorageRegistry = registry.SigStorageRegistry + gcrReleaseRegistry = registry.GcrReleaseRegistry + invalidRegistry = registry.InvalidRegistry + sampleRegistry = registry.SampleRegistry + microsoftRegistry = registry.MicrosoftRegistry + cloudProviderGcpRegistry = registry.CloudProviderGcpRegistry imageConfigs, originalImageConfigs = initImageConfigs() ) @@ -223,7 +226,7 @@ func initImageConfigs() (map[int]Config, map[int]Config) { configs[CheckMetadataConcealment] = Config{promoterE2eRegistry, "metadata-concealment", "1.6"} configs[CudaVectorAdd] = Config{e2eRegistry, "cuda-vector-add", "1.0"} configs[CudaVectorAdd2] = Config{promoterE2eRegistry, "cuda-vector-add", "2.2"} - configs[DebianIptables] = Config{buildImageRegistry, "debian-iptables", "buster-v1.6.0"} + configs[DebianIptables] = Config{buildImageRegistry, "debian-iptables", "buster-v1.6.5"} configs[EchoServer] = Config{promoterE2eRegistry, "echoserver", "2.3"} configs[Etcd] = Config{gcEtcdRegistry, "etcd", "3.4.13-0"} configs[GlusterDynamicProvisioner] = Config{promoterE2eRegistry, "glusterdynamic-provisioner", "v1.0"} @@ -401,6 +404,8 @@ func ReplaceRegistryInImageURL(imageURL string) (string, error) { registryAndUser = gcrReleaseRegistry case "docker.io/library": registryAndUser = dockerLibraryRegistry + case "k8s.gcr.io/cloud-provider-gcp": + registryAndUser = cloudProviderGcpRegistry default: if countParts == 1 { // We assume we found an image from docker hub library diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE b/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE index 74487567632c..6a66aea5eafe 100644 --- a/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE +++ b/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/aws.go b/vendor/k8s.io/legacy-cloud-providers/aws/aws.go index 3588f38cb4e0..aafc7fe05644 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/aws.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/aws.go @@ -35,7 +35,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/endpoints" @@ -821,8 +820,11 @@ func (p *awsSDKProvider) Compute(regionName string) (EC2, error) { } awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true). WithEndpointResolver(p.cfg.getResolver()) + sess, err := session.NewSessionWithOptions(session.Options{ + Config: *awsConfig, + SharedConfigState: session.SharedConfigEnable, + }) - sess, err := session.NewSession(awsConfig) if err != nil { return nil, fmt.Errorf("unable to initialize AWS session: %v", err) } @@ -843,8 +845,10 @@ func (p *awsSDKProvider) LoadBalancing(regionName string) (ELB, error) { } awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true). WithEndpointResolver(p.cfg.getResolver()) - - sess, err := session.NewSession(awsConfig) + sess, err := session.NewSessionWithOptions(session.Options{ + Config: *awsConfig, + SharedConfigState: session.SharedConfigEnable, + }) if err != nil { return nil, fmt.Errorf("unable to initialize AWS session: %v", err) } @@ -861,8 +865,10 @@ func (p *awsSDKProvider) LoadBalancingV2(regionName string) (ELBV2, error) { } awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true). WithEndpointResolver(p.cfg.getResolver()) - - sess, err := session.NewSession(awsConfig) + sess, err := session.NewSessionWithOptions(session.Options{ + Config: *awsConfig, + SharedConfigState: session.SharedConfigEnable, + }) if err != nil { return nil, fmt.Errorf("unable to initialize AWS session: %v", err) } @@ -880,8 +886,10 @@ func (p *awsSDKProvider) Autoscaling(regionName string) (ASG, error) { } awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true). WithEndpointResolver(p.cfg.getResolver()) - - sess, err := session.NewSession(awsConfig) + sess, err := session.NewSessionWithOptions(session.Options{ + Config: *awsConfig, + SharedConfigState: session.SharedConfigEnable, + }) if err != nil { return nil, fmt.Errorf("unable to initialize AWS session: %v", err) } @@ -911,8 +919,10 @@ func (p *awsSDKProvider) KeyManagement(regionName string) (KMS, error) { } awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true). WithEndpointResolver(p.cfg.getResolver()) - - sess, err := session.NewSession(awsConfig) + sess, err := session.NewSessionWithOptions(session.Options{ + Config: *awsConfig, + SharedConfigState: session.SharedConfigEnable, + }) if err != nil { return nil, fmt.Errorf("unable to initialize AWS session: %v", err) } @@ -1170,30 +1180,28 @@ func init() { return nil, fmt.Errorf("unable to validate custom endpoint overrides: %v", err) } - sess, err := session.NewSession(&aws.Config{}) + sess, err := session.NewSessionWithOptions(session.Options{ + Config: aws.Config{}, + SharedConfigState: session.SharedConfigEnable, + }) if err != nil { return nil, fmt.Errorf("unable to initialize AWS session: %v", err) } - var provider credentials.Provider - if cfg.Global.RoleARN == "" { - provider = &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.New(sess), - } - } else { + var creds *credentials.Credentials + if cfg.Global.RoleARN != "" { klog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN) - provider = &stscreds.AssumeRoleProvider{ + provider := &stscreds.AssumeRoleProvider{ Client: sts.New(sess), RoleARN: cfg.Global.RoleARN, } - } - creds := credentials.NewChainCredentials( - []credentials.Provider{ - &credentials.EnvProvider{}, - provider, - &credentials.SharedCredentialsProvider{}, - }) + creds = credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + provider, + }) + } aws := newAWSSDKProvider(creds, cfg) return newAWSCloud(*cfg, aws) diff --git a/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go b/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go index 0fa011580458..3d59abc8879a 100644 --- a/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go +++ b/vendor/k8s.io/legacy-cloud-providers/aws/aws_loadbalancer.go @@ -65,6 +65,10 @@ var ( defaultNlbHealthCheckThreshold = int64(3) defaultHealthCheckPort = "traffic-port" defaultHealthCheckPath = "/" + + // Defaults for ELB Target operations + defaultRegisterTargetsChunkSize = 100 + defaultDeregisterTargetsChunkSize = 100 ) func isNLB(annotations map[string]string) bool { @@ -563,6 +567,7 @@ func (c *Cloud) deleteListenerV2(listener *elbv2.Listener) error { // ensureTargetGroup creates a target group with a set of instances. func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName types.NamespacedName, mapping nlbPortMapping, instances []string, vpcID string, tags map[string]string) (*elbv2.TargetGroup, error) { dirty := false + expectedTargets := c.computeTargetGroupExpectedTargets(instances, mapping.TrafficPort) if targetGroup == nil { targetType := "instance" name := c.buildTargetGroupName(serviceName, mapping.FrontendPort, mapping.TrafficPort, mapping.TrafficProtocol, targetType, mapping) @@ -609,86 +614,23 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty } } - registerInput := &elbv2.RegisterTargetsInput{ - TargetGroupArn: result.TargetGroups[0].TargetGroupArn, - Targets: []*elbv2.TargetDescription{}, - } - for _, instanceID := range instances { - registerInput.Targets = append(registerInput.Targets, &elbv2.TargetDescription{ - Id: aws.String(string(instanceID)), - Port: aws.Int64(mapping.TrafficPort), - }) - } - - _, err = c.elbv2.RegisterTargets(registerInput) - if err != nil { - return nil, fmt.Errorf("error registering targets for load balancer: %q", err) + tg := result.TargetGroups[0] + tgARN := aws.StringValue(tg.TargetGroupArn) + if err := c.ensureTargetGroupTargets(tgARN, expectedTargets, nil); err != nil { + return nil, err } - - return result.TargetGroups[0], nil + return tg, nil } // handle instances in service { - healthResponse, err := c.elbv2.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{TargetGroupArn: targetGroup.TargetGroupArn}) + tgARN := aws.StringValue(targetGroup.TargetGroupArn) + actualTargets, err := c.obtainTargetGroupActualTargets(tgARN) if err != nil { - return nil, fmt.Errorf("error describing target group health: %q", err) - } - actualIDs := []string{} - for _, healthDescription := range healthResponse.TargetHealthDescriptions { - if aws.StringValue(healthDescription.TargetHealth.State) == elbv2.TargetHealthStateEnumHealthy { - actualIDs = append(actualIDs, *healthDescription.Target.Id) - } else if healthDescription.TargetHealth.Reason != nil { - switch aws.StringValue(healthDescription.TargetHealth.Reason) { - case elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress: - // We don't need to count this instance in service if it is - // on its way out - default: - actualIDs = append(actualIDs, *healthDescription.Target.Id) - } - } - } - - actual := sets.NewString(actualIDs...) - expected := sets.NewString(instances...) - - additions := expected.Difference(actual) - removals := actual.Difference(expected) - - if len(additions) > 0 { - registerInput := &elbv2.RegisterTargetsInput{ - TargetGroupArn: targetGroup.TargetGroupArn, - Targets: []*elbv2.TargetDescription{}, - } - for instanceID := range additions { - registerInput.Targets = append(registerInput.Targets, &elbv2.TargetDescription{ - Id: aws.String(instanceID), - Port: aws.Int64(mapping.TrafficPort), - }) - } - _, err := c.elbv2.RegisterTargets(registerInput) - if err != nil { - return nil, fmt.Errorf("error registering new targets in target group: %q", err) - } - dirty = true + return nil, err } - - if len(removals) > 0 { - deregisterInput := &elbv2.DeregisterTargetsInput{ - TargetGroupArn: targetGroup.TargetGroupArn, - Targets: []*elbv2.TargetDescription{}, - } - for instanceID := range removals { - deregisterInput.Targets = append(deregisterInput.Targets, &elbv2.TargetDescription{ - Id: aws.String(instanceID), - Port: aws.Int64(mapping.TrafficPort), - }) - } - _, err := c.elbv2.DeregisterTargets(deregisterInput) - if err != nil { - return nil, fmt.Errorf("error trying to deregister targets in target group: %q", err) - } - dirty = true + if err := c.ensureTargetGroupTargets(tgARN, expectedTargets, actualTargets); err != nil { + return nil, err } } @@ -738,6 +680,101 @@ func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, serviceName ty return targetGroup, nil } +func (c *Cloud) ensureTargetGroupTargets(tgARN string, expectedTargets []*elbv2.TargetDescription, actualTargets []*elbv2.TargetDescription) error { + targetsToRegister, targetsToDeregister := c.diffTargetGroupTargets(expectedTargets, actualTargets) + if len(targetsToRegister) > 0 { + targetsToRegisterChunks := c.chunkTargetDescriptions(targetsToRegister, defaultRegisterTargetsChunkSize) + for _, targetsChunk := range targetsToRegisterChunks { + req := &elbv2.RegisterTargetsInput{ + TargetGroupArn: aws.String(tgARN), + Targets: targetsChunk, + } + if _, err := c.elbv2.RegisterTargets(req); err != nil { + return fmt.Errorf("error trying to register targets in target group: %q", err) + } + } + } + if len(targetsToDeregister) > 0 { + targetsToDeregisterChunks := c.chunkTargetDescriptions(targetsToDeregister, defaultDeregisterTargetsChunkSize) + for _, targetsChunk := range targetsToDeregisterChunks { + req := &elbv2.DeregisterTargetsInput{ + TargetGroupArn: aws.String(tgARN), + Targets: targetsChunk, + } + if _, err := c.elbv2.DeregisterTargets(req); err != nil { + return fmt.Errorf("error trying to deregister targets in target group: %q", err) + } + } + } + return nil +} + +func (c *Cloud) computeTargetGroupExpectedTargets(instanceIDs []string, port int64) []*elbv2.TargetDescription { + expectedTargets := make([]*elbv2.TargetDescription, 0, len(instanceIDs)) + for _, instanceID := range instanceIDs { + expectedTargets = append(expectedTargets, &elbv2.TargetDescription{ + Id: aws.String(instanceID), + Port: aws.Int64(port), + }) + } + return expectedTargets +} + +func (c *Cloud) obtainTargetGroupActualTargets(tgARN string) ([]*elbv2.TargetDescription, error) { + req := &elbv2.DescribeTargetHealthInput{ + TargetGroupArn: aws.String(tgARN), + } + resp, err := c.elbv2.DescribeTargetHealth(req) + if err != nil { + return nil, fmt.Errorf("error describing target group health: %q", err) + } + actualTargets := make([]*elbv2.TargetDescription, 0, len(resp.TargetHealthDescriptions)) + for _, targetDesc := range resp.TargetHealthDescriptions { + if targetDesc.TargetHealth.Reason != nil && aws.StringValue(targetDesc.TargetHealth.Reason) == elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress { + continue + } + actualTargets = append(actualTargets, targetDesc.Target) + } + return actualTargets, nil +} + +// diffTargetGroupTargets computes the targets to register and targets to deregister based on existingTargets and desired instances. +func (c *Cloud) diffTargetGroupTargets(expectedTargets []*elbv2.TargetDescription, actualTargets []*elbv2.TargetDescription) (targetsToRegister []*elbv2.TargetDescription, targetsToDeregister []*elbv2.TargetDescription) { + expectedTargetsByUID := make(map[string]*elbv2.TargetDescription, len(expectedTargets)) + for _, target := range expectedTargets { + targetUID := fmt.Sprintf("%v:%v", aws.StringValue(target.Id), aws.Int64Value(target.Port)) + expectedTargetsByUID[targetUID] = target + } + actualTargetsByUID := make(map[string]*elbv2.TargetDescription, len(actualTargets)) + for _, target := range actualTargets { + targetUID := fmt.Sprintf("%v:%v", aws.StringValue(target.Id), aws.Int64Value(target.Port)) + actualTargetsByUID[targetUID] = target + } + + expectedTargetsUIDs := sets.StringKeySet(expectedTargetsByUID) + actualTargetsUIDs := sets.StringKeySet(actualTargetsByUID) + for _, targetUID := range expectedTargetsUIDs.Difference(actualTargetsUIDs).List() { + targetsToRegister = append(targetsToRegister, expectedTargetsByUID[targetUID]) + } + for _, targetUID := range actualTargetsUIDs.Difference(expectedTargetsUIDs).List() { + targetsToDeregister = append(targetsToDeregister, actualTargetsByUID[targetUID]) + } + return targetsToRegister, targetsToDeregister +} + +// chunkTargetDescriptions will split slice of TargetDescription into chunks +func (c *Cloud) chunkTargetDescriptions(targets []*elbv2.TargetDescription, chunkSize int) [][]*elbv2.TargetDescription { + var chunks [][]*elbv2.TargetDescription + for i := 0; i < len(targets); i += chunkSize { + end := i + chunkSize + if end > len(targets) { + end = len(targets) + } + chunks = append(chunks, targets[i:end]) + } + return chunks +} + // updateInstanceSecurityGroupsForNLB will adjust securityGroup's settings to allow inbound traffic into instances from clientCIDRs and portMappings. // TIP: if either instances or clientCIDRs or portMappings are nil, then the securityGroup rules for lbName are cleared. func (c *Cloud) updateInstanceSecurityGroupsForNLB(lbName string, instances map[InstanceID]*ec2.Instance, subnetCIDRs []string, clientCIDRs []string, portMappings []nlbPortMapping) error { diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure.go index 2d36a5c2bcd5..dbc9f593b501 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure.go @@ -274,6 +274,8 @@ type Cloud struct { ipv6DualStackEnabled bool // Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes. nodeCachesLock sync.RWMutex + // nodeNames holds current nodes for tracking added nodes in VM caches. + nodeNames sets.String // nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone // it is updated by the nodeInformer nodeZones map[string]sets.String @@ -342,6 +344,7 @@ func NewCloudWithoutFeatureGates(configReader io.Reader) (*Cloud, error) { } az := &Cloud{ + nodeNames: sets.NewString(), nodeZones: map[string]sets.String{}, nodeResourceGroups: map[string]string{}, unmanagedNodes: sets.NewString(), @@ -782,6 +785,9 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { defer az.nodeCachesLock.Unlock() if prevNode != nil { + // Remove from nodeNames cache. + az.nodeNames.Delete(prevNode.ObjectMeta.Name) + // Remove from nodeZones cache. prevZone, ok := prevNode.ObjectMeta.Labels[LabelFailureDomainBetaZone] if ok && az.isAvailabilityZone(prevZone) { @@ -805,6 +811,9 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { } if newNode != nil { + // Add to nodeNames cache. + az.nodeNames.Insert(newNode.ObjectMeta.Name) + // Add to nodeZones cache. newZone, ok := newNode.ObjectMeta.Labels[LabelFailureDomainBetaZone] if ok && az.isAvailabilityZone(newZone) { @@ -876,6 +885,22 @@ func (az *Cloud) GetNodeResourceGroup(nodeName string) (string, error) { return az.ResourceGroup, nil } +// GetNodeNames returns a set of all node names in the k8s cluster. +func (az *Cloud) GetNodeNames() (sets.String, error) { + // Kubelet won't set az.nodeInformerSynced, return nil. + if az.nodeInformerSynced == nil { + return nil, nil + } + + az.nodeCachesLock.RLock() + defer az.nodeCachesLock.RUnlock() + if !az.nodeInformerSynced() { + return nil, fmt.Errorf("node informer is not synced when trying to GetNodeNames") + } + + return sets.NewString(az.nodeNames.List()...), nil +} + // GetResourceGroups returns a set of resource groups that all nodes are running on. func (az *Cloud) GetResourceGroups() (sets.String, error) { // Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup. diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go index 446b0d4192d5..bee2150aba05 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go @@ -453,5 +453,8 @@ func getValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourc func isInstanceNotFoundError(err error) bool { errMsg := strings.ToLower(err.Error()) + if strings.Contains(errMsg, strings.ToLower(vmssVMNotActiveErrorMessage)) { + return true + } return strings.Contains(errMsg, errStatusCode400) && strings.Contains(errMsg, errInvalidParameter) && strings.Contains(errMsg, errTargetInstanceIds) } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go index 912ab10748f4..2717ce35c7f0 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go @@ -20,6 +20,7 @@ package azure import ( "context" + "errors" "fmt" "os" "strings" @@ -29,6 +30,8 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" azcache "k8s.io/legacy-cloud-providers/azure/cache" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" ) const ( @@ -233,10 +236,22 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st return false, err } - klog.V(5).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName) + klog.V(3).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName) + + provisioningState, err := az.VMSet.GetProvisioningStateByNodeName(string(nodeName)) + if err != nil { + // Returns false, so the controller manager will continue to check InstanceExistsByProviderID(). + if errors.Is(err, cloudprovider.InstanceNotFound) { + return false, nil + } + + return false, err + } + klog.V(3).Infof("InstanceShutdownByProviderID gets provisioning state %q for node %q", provisioningState, nodeName) status := strings.ToLower(powerStatus) - return status == vmPowerStateStopped || status == vmPowerStateDeallocated || status == vmPowerStateDeallocating, nil + provisioningSucceeded := strings.EqualFold(strings.ToLower(provisioningState), strings.ToLower(string(compute.ProvisioningStateSucceeded))) + return provisioningSucceeded && (status == vmPowerStateStopped || status == vmPowerStateDeallocated || status == vmPowerStateDeallocating), nil } func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) { diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 2c1c4dfaa3f2..cdd421f646f1 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -20,6 +20,7 @@ package azure import ( "context" + "errors" "fmt" "math" "reflect" @@ -309,7 +310,7 @@ func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer, ipConf := (*bp.BackendIPConfigurations)[i] ipConfigID := to.String(ipConf.ID) _, vmSetName, err := az.VMSet.GetNodeNameByIPConfigurationID(ipConfigID) - if err != nil { + if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) { return nil, err } primaryVMSetName := az.VMSet.GetPrimaryVMSetName() @@ -1134,13 +1135,10 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for _, ipConf := range *bp.BackendIPConfigurations { ipConfID := to.String(ipConf.ID) nodeName, _, err := az.VMSet.GetNodeNameByIPConfigurationID(ipConfID) - if err != nil { + if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) { return nil, err } - if nodeName == "" { - // VM may under deletion - continue - } + // If a node is not supposed to be included in the LB, it // would not be in the `nodes` slice. We need to check the nodes that // have been added to the LB's backendpool, find the unwanted ones and @@ -1258,13 +1256,6 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // construct FrontendIPConfigurationPropertiesFormat var fipConfigurationProperties *network.FrontendIPConfigurationPropertiesFormat if isInternal { - // azure does not support ILB for IPv6 yet. - // TODO: remove this check when ILB supports IPv6 *and* the SDK - // have been rev'ed to 2019* version - if utilnet.IsIPv6String(service.Spec.ClusterIP) { - return nil, fmt.Errorf("ensure(%s): lb(%s) - internal load balancers does not support IPv6", serviceName, lbName) - } - subnetName := subnet(service) if subnetName == nil { subnetName = &az.SubnetName @@ -1282,6 +1273,10 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, Subnet: &subnet, } + if utilnet.IsIPv6String(service.Spec.ClusterIP) { + configProperties.PrivateIPAddressVersion = network.IPv6 + } + loadBalancerIP := service.Spec.LoadBalancerIP if loadBalancerIP != "" { configProperties.PrivateIPAllocationMethod = network.Static @@ -1869,18 +1864,18 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix) sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName) if !sharedRuleFound { - klog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) - return nil, fmt.Errorf("expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) + klog.V(4).Infof("Didn't find shared rule %s for service %s", sharedRuleName, service.Name) + continue } if sharedRule.DestinationAddressPrefixes == nil { - klog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) - return nil, fmt.Errorf("expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) + klog.V(4).Infof("Didn't find DestinationAddressPrefixes in shared rule for service %s", service.Name) + continue } existingPrefixes := *sharedRule.DestinationAddressPrefixes addressIndex, found := findIndex(existingPrefixes, destinationIPAddress) if !found { - klog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) - return nil, fmt.Errorf("expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) + klog.V(4).Infof("Didn't find destination address %v in shared rule %s for service %s", destinationIPAddress, sharedRuleName, service.Name) + continue } if len(existingPrefixes) == 1 { updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...) @@ -2142,15 +2137,13 @@ func shouldReleaseExistingOwnedPublicIP(existingPip *network.PublicIPAddress, lb // ensurePIPTagged ensures the public IP of the service is tagged as configured func (az *Cloud) ensurePIPTagged(service *v1.Service, pip *network.PublicIPAddress) bool { - changed := false configTags := parseTags(az.Tags) annotationTags := make(map[string]*string) if _, ok := service.Annotations[ServiceAnnotationAzurePIPTags]; ok { annotationTags = parseTags(service.Annotations[ServiceAnnotationAzurePIPTags]) } - for k, v := range annotationTags { - configTags[k] = v - } + configTags, _ = reconcileTags(configTags, annotationTags) + // include the cluster name and service names tags when comparing var clusterName, serviceNames *string if v, ok := pip.Tags[clusterNameKey]; ok { @@ -2165,12 +2158,10 @@ func (az *Cloud) ensurePIPTagged(service *v1.Service, pip *network.PublicIPAddre if serviceNames != nil { configTags[serviceTagKey] = serviceNames } - for k, v := range configTags { - if vv, ok := pip.Tags[k]; !ok || !strings.EqualFold(to.String(v), to.String(vv)) { - pip.Tags[k] = v - changed = true - } - } + + tags, changed := reconcileTags(pip.Tags, configTags) + pip.Tags = tags + return changed } @@ -2238,9 +2229,11 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa } dirtyPIP = true } - changed := az.ensurePIPTagged(service, &pip) - if changed { - dirtyPIP = true + if !isUserAssignedPIP { + changed := az.ensurePIPTagged(service, &pip) + if changed { + dirtyPIP = true + } } if shouldReleaseExistingOwnedPublicIP(&pip, wantLb, isInternal, isUserAssignedPIP, desiredPipName, serviceIPTagRequest) { // Then, release the public ip @@ -2435,7 +2428,7 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b if !strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) { continue } - if existingRule.Protocol != rule.Protocol { + if !strings.EqualFold(string(existingRule.Protocol), string(rule.Protocol)) { continue } if !strings.EqualFold(to.String(existingRule.SourcePortRange), to.String(rule.SourcePortRange)) { @@ -2452,10 +2445,10 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b continue } } - if existingRule.Access != rule.Access { + if !strings.EqualFold(string(existingRule.Access), string(rule.Access)) { continue } - if existingRule.Direction != rule.Direction { + if !strings.EqualFold(string(existingRule.Direction), string(rule.Direction)) { continue } return true @@ -2729,7 +2722,6 @@ func unbindServiceFromPIP(pip *network.PublicIPAddress, serviceName string) erro // ensureLoadBalancerTagged ensures every load balancer in the resource group is tagged as configured func (az *Cloud) ensureLoadBalancerTagged(lb *network.LoadBalancer) bool { - changed := false if az.Tags == "" { return false } @@ -2737,18 +2729,15 @@ func (az *Cloud) ensureLoadBalancerTagged(lb *network.LoadBalancer) bool { if lb.Tags == nil { lb.Tags = make(map[string]*string) } - for k, v := range tags { - if vv, ok := lb.Tags[k]; !ok || !strings.EqualFold(to.String(v), to.String(vv)) { - lb.Tags[k] = v - changed = true - } - } + + tags, changed := reconcileTags(lb.Tags, tags) + lb.Tags = tags + return changed } // ensureSecurityGroupTagged ensures the security group is tagged as configured func (az *Cloud) ensureSecurityGroupTagged(sg *network.SecurityGroup) bool { - changed := false if az.Tags == "" { return false } @@ -2756,11 +2745,9 @@ func (az *Cloud) ensureSecurityGroupTagged(sg *network.SecurityGroup) bool { if sg.Tags == nil { sg.Tags = make(map[string]*string) } - for k, v := range tags { - if vv, ok := sg.Tags[k]; !ok || !strings.EqualFold(to.String(v), to.String(vv)) { - sg.Tags[k] = v - changed = true - } - } + + tags, changed := reconcileTags(sg.Tags, tags) + sg.Tags = tags + return changed } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go index e93401eb5049..17742af77cfb 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go @@ -21,6 +21,7 @@ package azure import ( "context" "fmt" + "net/http" "path" "strconv" "strings" @@ -236,6 +237,10 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error { rerr = c.common.cloud.DisksClient.Delete(ctx, resourceGroup, diskName) if rerr != nil { + if rerr.HTTPStatusCode == http.StatusNotFound { + klog.V(2).Infof("azureDisk - disk(%s) is already deleted", diskURI) + return nil + } return rerr.Error() } // We don't need poll here, k8s will immediately stop referencing the disk diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go index 7807dbcb0a8f..bb61f9c519d3 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_routes.go @@ -149,12 +149,17 @@ func (d *delayedRouteUpdater) updateRoutes() { } // reconcile routes. - dirty := false + dirty, onlyUpdateTags := false, true routes := []network.Route{} if routeTable.Routes != nil { routes = *routeTable.Routes } - onlyUpdateTags := true + + routes, dirty = d.cleanupOutdatedRoutes(routes) + if dirty { + onlyUpdateTags = false + } + for _, rt := range d.routesToUpdate { if rt.operation == routeTableOperationUpdateTags { routeTable.Tags = rt.routeTableTags @@ -204,6 +209,34 @@ func (d *delayedRouteUpdater) updateRoutes() { } } +// cleanupOutdatedRoutes deletes all non-dualstack routes when dualstack is enabled, +// and deletes all dualstack routes when dualstack is not enabled. +func (d *delayedRouteUpdater) cleanupOutdatedRoutes(existingRoutes []network.Route) (routes []network.Route, changed bool) { + for i := len(existingRoutes) - 1; i >= 0; i-- { + existingRouteName := to.String(existingRoutes[i].Name) + split := strings.Split(existingRouteName, routeNameSeparator) + + // filter out unmanaged routes + deleteRoute := false + if d.az.nodeNames.Has(split[0]) { + if d.az.ipv6DualStackEnabled && len(split) == 1 { + klog.V(2).Infof("cleanupOutdatedRoutes: deleting outdated non-dualstack route %s", existingRouteName) + deleteRoute = true + } else if !d.az.ipv6DualStackEnabled && len(split) == 2 { + klog.V(2).Infof("cleanupOutdatedRoutes: deleting outdated dualstack route %s", existingRouteName) + deleteRoute = true + } + + if deleteRoute { + existingRoutes = append(existingRoutes[:i], existingRoutes[i+1:]...) + changed = true + } + } + } + + return existingRoutes, changed +} + // addRouteOperation adds the routeOperation to delayedRouteUpdater and returns a delayedRouteOperation. func (d *delayedRouteUpdater) addRouteOperation(operation routeOperation, route network.Route) (*delayedRouteOperation, error) { d.lock.Lock() @@ -508,16 +541,13 @@ func (az *Cloud) ensureRouteTableTagged(rt *network.RouteTable) (map[string]*str if az.Tags == "" { return nil, false } - changed := false tags := parseTags(az.Tags) if rt.Tags == nil { rt.Tags = make(map[string]*string) } - for k, v := range tags { - if vv, ok := rt.Tags[k]; !ok || !strings.EqualFold(to.String(v), to.String(vv)) { - rt.Tags[k] = v - changed = true - } - } + + tags, changed := reconcileTags(rt.Tags, tags) + rt.Tags = tags + return rt.Tags, changed } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go index 6f9fdb6dc931..a8ee19b10dac 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go @@ -365,9 +365,13 @@ func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, serv klog.Warningf("serviceOwnsFrontendIP: unexpected error when finding match public IP of the service %s with loadBalancerLP %s: %v", service.Name, loadBalancerIP, err) return false, isPrimaryService, nil } - - if pip != nil && pip.ID != nil && pip.PublicIPAddressPropertiesFormat != nil && pip.IPAddress != nil { - if strings.EqualFold(*pip.ID, *fip.PublicIPAddress.ID) { + if pip != nil && + pip.ID != nil && + pip.PublicIPAddressPropertiesFormat != nil && + pip.IPAddress != nil && + fip.FrontendIPConfigurationPropertiesFormat != nil && + fip.FrontendIPConfigurationPropertiesFormat.PublicIPAddress != nil { + if strings.EqualFold(to.String(pip.ID), to.String(fip.PublicIPAddress.ID)) { klog.V(4).Infof("serviceOwnsFrontendIP: found secondary service %s of the frontend IP config %s", service.Name, *fip.Name) return true, isPrimaryService, nil @@ -377,7 +381,7 @@ func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, serv return false, isPrimaryService, nil } - return false, isPrimaryService, fmt.Errorf("serviceOwnsFrontendIP: wrong parameters") + return false, isPrimaryService, nil } // for internal secondary service the private IP address on the frontend IP config should be checked @@ -454,6 +458,7 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) machine, err = as.getVirtualMachine(types.NodeName(name), azcache.CacheReadTypeUnsafe) if err == cloudprovider.InstanceNotFound { + klog.Warningf("Unable to find node %s: %v", name, cloudprovider.InstanceNotFound) return "", cloudprovider.InstanceNotFound } if err != nil { @@ -500,6 +505,20 @@ func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState str return vmPowerStateStopped, nil } +// GetProvisioningStateByNodeName returns the provisioningState for the specified node. +func (as *availabilitySet) GetProvisioningStateByNodeName(name string) (provisioningState string, err error) { + vm, err := as.getVirtualMachine(types.NodeName(name), azcache.CacheReadTypeDefault) + if err != nil { + return provisioningState, err + } + + if vm.VirtualMachineProperties == nil || vm.VirtualMachineProperties.ProvisioningState == nil { + return provisioningState, nil + } + + return to.String(vm.VirtualMachineProperties.ProvisioningState), nil +} + // GetNodeNameByProviderID gets the node name by provider ID. func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) { // NodeName is part of providerID for standard instances. @@ -947,13 +966,16 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend } } nicUpdaters := make([]func() error, 0) - errors := make([]error, 0) + allErrs := make([]error, 0) for i := range ipConfigurationIDs { ipConfigurationID := ipConfigurationIDs[i] nodeName, _, err := as.GetNodeNameByIPConfigurationID(ipConfigurationID) - if err != nil { + if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) { klog.Errorf("Failed to GetNodeNameByIPConfigurationID(%s): %v", ipConfigurationID, err) - errors = append(errors, err) + allErrs = append(allErrs, err) + continue + } + if nodeName == "" { continue } @@ -1020,9 +1042,9 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend if errs != nil { return utilerrors.Flatten(errs) } - // Fail if there are other errors. - if len(errors) > 0 { - return utilerrors.Flatten(utilerrors.NewAggregate(errors)) + // Fail if there are other allErrs. + if len(allErrs) > 0 { + return utilerrors.Flatten(utilerrors.NewAggregate(allErrs)) } isOperationSucceeded = true @@ -1030,6 +1052,11 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend } func getAvailabilitySetNameByID(asID string) (string, error) { + // for standalone VM + if asID == "" { + return "", nil + } + matches := vmasIDRE.FindStringSubmatch(asID) if len(matches) != 2 { return "", fmt.Errorf("getAvailabilitySetNameByID: failed to parse the VMAS ID %s", asID) @@ -1081,7 +1108,8 @@ func (as *availabilitySet) GetNodeNameByIPConfigurationID(ipConfigurationID stri vm, err := as.getVirtualMachine(types.NodeName(vmName), azcache.CacheReadTypeDefault) if err != nil { - return "", "", fmt.Errorf("cannot get the virtual machine by node name %s", vmName) + klog.Errorf("Unable to get the virtual machine by node name %s: %v", vmName, err) + return "", "", err } asID := "" if vm.VirtualMachineProperties != nil && vm.AvailabilitySet != nil { @@ -1093,7 +1121,7 @@ func (as *availabilitySet) GetNodeNameByIPConfigurationID(ipConfigurationID stri asName, err := getAvailabilitySetNameByID(asID) if err != nil { - return "", "", fmt.Errorf("cannot get the availability set name by the availability set ID %s", asID) + return "", "", fmt.Errorf("cannot get the availability set name by the availability set ID %s: %v", asID, err) } return vmName, strings.ToLower(asName), nil } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go index c5d1b203cf47..4c8baac728af 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_utils.go @@ -138,3 +138,28 @@ func parseTags(tags string) map[string]*string { } return formatted } + +func findKeyInMapCaseInsensitive(targetMap map[string]*string, key string) (bool, string) { + for k := range targetMap { + if strings.EqualFold(k, key) { + return true, k + } + } + + return false, "" +} + +func reconcileTags(currentTagsOnResource, newTags map[string]*string) (reconciledTags map[string]*string, changed bool) { + for k, v := range newTags { + found, key := findKeyInMapCaseInsensitive(currentTagsOnResource, k) + if !found { + currentTagsOnResource[k] = v + changed = true + } else if !strings.EqualFold(to.String(v), to.String(currentTagsOnResource[key])) { + currentTagsOnResource[key] = v + changed = true + } + } + + return currentTagsOnResource, changed +} diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go index a49230e4e116..d76dd2043784 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go @@ -71,11 +71,14 @@ type VMSet interface { // DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI. DetachDisk(diskName, diskURI string, nodeName types.NodeName) error // GetDataDisks gets a list of data disks attached to the node. - GetDataDisks(nodeName types.NodeName, string azcache.AzureCacheReadType) ([]compute.DataDisk, error) + GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, error) // GetPowerStatusByNodeName returns the power state of the specified node. GetPowerStatusByNodeName(name string) (string, error) + // GetProvisioningStateByNodeName returns the provisioningState for the specified node. + GetProvisioningStateByNodeName(name string) (string, error) + // GetPrivateIPsByNodeName returns a slice of all private ips assigned to node (ipv6 and ipv4) GetPrivateIPsByNodeName(name string) ([]string, error) diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index 8504aada304b..2e7a48a9f396 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -74,7 +74,8 @@ type scaleSet struct { *Cloud // availabilitySet is also required for scaleSet because some instances - // (e.g. master nodes) may not belong to any scale sets. + // (e.g. control plane nodes) may not belong to any scale sets. + // this also allows for clusters with both VM and VMSS nodes. availabilitySet VMSet vmssCache *azcache.TimedCache @@ -194,6 +195,7 @@ func (ss *scaleSet) getVmssVMByNodeIdentity(node *nodeIdentity, crt azcache.Azur } if !found || vm == nil { + klog.Warningf("Unable to find node %s: %v", node.nodeName, cloudprovider.InstanceNotFound) return "", "", nil, cloudprovider.InstanceNotFound } return vmssName, instanceID, vm, nil @@ -242,6 +244,30 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er return vmPowerStateStopped, nil } +// GetProvisioningStateByNodeName returns the provisioningState for the specified node. +func (ss *scaleSet) GetProvisioningStateByNodeName(name string) (provisioningState string, err error) { + managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe) + if err != nil { + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) + return "", err + } + if managedByAS { + // vm is managed by availability set. + return ss.availabilitySet.GetProvisioningStateByNodeName(name) + } + + _, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault) + if err != nil { + return provisioningState, err + } + + if vm.VirtualMachineScaleSetVMProperties == nil || vm.VirtualMachineScaleSetVMProperties.ProvisioningState == nil { + return provisioningState, nil + } + + return to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), nil +} + // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. // The node must belong to one of scale sets. func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt azcache.AzureCacheReadType) (*compute.VirtualMachineScaleSetVM, error) { @@ -317,6 +343,7 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { _, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe) if err != nil { + klog.Errorf("Unable to find node %s: %v", name, err) return "", err } @@ -364,6 +391,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, azcache.CacheReadTypeUnsafe) if err != nil { + klog.Errorf("Unable to find node by providerID %s: %v", providerID, err) return "", err } @@ -684,6 +712,7 @@ func (ss *scaleSet) getNodeIdentityByNodeName(nodeName string, crt azcache.Azure return nil, err } if node.vmssName == "" { + klog.Warningf("Unable to find node %s: %v", nodeName, cloudprovider.InstanceNotFound) return nil, cloudprovider.InstanceNotFound } return node, nil @@ -696,7 +725,7 @@ func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compu allVMs, rerr := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, string(compute.InstanceView)) if rerr != nil { - klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", rerr) + klog.Errorf("VirtualMachineScaleSetVMsClient.List(%s, %s) failed: %v", resourceGroup, scaleSetName, rerr) if rerr.IsNotFound() { return nil, cloudprovider.InstanceNotFound } @@ -935,6 +964,11 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam vmName := mapNodeNameToVMName(nodeName) ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { + if errors.Is(err, cloudprovider.InstanceNotFound) { + klog.Infof("EnsureHostInPool: skipping node %s because it is not found", vmName) + return "", "", "", nil, nil + } + return "", "", "", nil, err } @@ -1033,7 +1067,6 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam }) primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools newVM := &compute.VirtualMachineScaleSetVM{ - Sku: vm.Sku, Location: vm.Location, VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ HardwareProfile: vm.HardwareProfile, @@ -1175,7 +1208,6 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back }) primaryIPConfig.LoadBalancerBackendAddressPools = &loadBalancerBackendAddressPools newVMSS := compute.VirtualMachineScaleSet{ - Sku: vmss.Sku, Location: vmss.Location, VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{ @@ -1312,6 +1344,11 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac func (ss *scaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) { ssName, instanceID, vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault) if err != nil { + if errors.Is(err, cloudprovider.InstanceNotFound) { + klog.Infof("ensureBackendPoolDeletedFromNode: skipping node %s because it is not found", nodeName) + return "", "", "", nil, nil + } + return "", "", "", nil, err } @@ -1356,7 +1393,6 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID str // Compose a new vmssVM with added backendPoolID. primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools newVM := &compute.VirtualMachineScaleSetVM{ - Sku: vm.Sku, Location: vm.Location, VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ HardwareProfile: vm.HardwareProfile, @@ -1379,8 +1415,17 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID str func (ss *scaleSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) { matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID) if len(matches) != 4 { + if ss.DisableAvailabilitySetNodes { + return "", "", ErrorNotVmssInstance + } + klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is managed by availability set", ipConfigurationID) - return "", "", ErrorNotVmssInstance + name, rg, err := ss.availabilitySet.GetNodeNameByIPConfigurationID(ipConfigurationID) + if err != nil && !errors.Is(err, cloudprovider.InstanceNotFound) { + klog.Errorf("GetNodeNameByIPConfigurationID: failed to invoke availabilitySet.GetNodeNameByIPConfigurationID: %s", err.Error()) + return "", "", err + } + return name, rg, nil } resourceGroup := matches[1] @@ -1388,6 +1433,7 @@ func (ss *scaleSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (st instanceID := matches[3] vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, azcache.CacheReadTypeUnsafe) if err != nil { + klog.Errorf("Unable to find node by ipConfigurationID %s: %v", ipConfigurationID, err) return "", "", err } @@ -1431,10 +1477,15 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen vmssNamesMap[vmSetName] = true } + vmssUpdaters := make([]func() error, 0, len(vmssNamesMap)) + errors := make([]error, 0, len(vmssNamesMap)) for vmssName := range vmssNamesMap { + vmssName := vmssName vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault) if err != nil { - return err + klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get VMSS %s: %v", vmssName, err) + errors = append(errors, err) + continue } // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. @@ -1450,11 +1501,15 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations primaryNIC, err := ss.getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName) if err != nil { - return err + klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get the primary network interface config of the VMSS %s: %v", vmssName, err) + errors = append(errors, err) + continue } primaryIPConfig, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC) if err != nil { - return err + klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to the primary IP config from the VMSS %s's network config : %v", vmssName, err) + errors = append(errors, err) + continue } loadBalancerBackendAddressPools := []compute.SubResource{} if primaryIPConfig.LoadBalancerBackendAddressPools != nil { @@ -1475,26 +1530,38 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen continue } - // Compose a new vmss with added backendPoolID. - primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools - newVMSS := compute.VirtualMachineScaleSet{ - Sku: vmss.Sku, - Location: vmss.Location, - VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ - VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{ - NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{ - NetworkInterfaceConfigurations: &vmssNIC, + vmssUpdaters = append(vmssUpdaters, func() error { + // Compose a new vmss with added backendPoolID. + primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools + newVMSS := compute.VirtualMachineScaleSet{ + Location: vmss.Location, + VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{ + VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{ + NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{ + NetworkInterfaceConfigurations: &vmssNIC, + }, }, }, - }, - } + } - klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID) - rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS) - if rerr != nil { - klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, err) - return rerr.Error() - } + klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID) + rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS) + if rerr != nil { + klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, rerr) + return rerr.Error() + } + + return nil + }) + } + + errs := utilerrors.AggregateGoroutines(vmssUpdaters...) + if errs != nil { + return utilerrors.Flatten(errs) + } + // Fail if there are other errors. + if len(errors) > 0 { + return utilerrors.Flatten(utilerrors.NewAggregate(errors)) } return nil @@ -1528,7 +1595,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, hostUpdates := make([]func() error, 0, len(ipConfigurationIDs)) nodeUpdates := make(map[vmssMetaInfo]map[string]compute.VirtualMachineScaleSetVM) - errors := make([]error, 0) + allErrs := make([]error, 0) for i := range ipConfigurationIDs { ipConfigurationID := ipConfigurationIDs[i] @@ -1548,14 +1615,16 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, } klog.Errorf("Failed to GetNodeNameByIPConfigurationID(%s): %v", ipConfigurationID, err) - errors = append(errors, err) + allErrs = append(allErrs, err) continue } nodeResourceGroup, nodeVMSS, nodeInstanceID, nodeVMSSVM, err := ss.ensureBackendPoolDeletedFromNode(nodeName, backendPoolID) if err != nil { - klog.Errorf("EnsureBackendPoolDeleted(%s): backendPoolID(%s) - failed with error %v", getServiceName(service), backendPoolID, err) - errors = append(errors, err) + if !errors.Is(err, ErrorNotVmssInstance) { // Do nothing for the VMAS nodes. + klog.Errorf("EnsureBackendPoolDeleted(%s): backendPoolID(%s) - failed with error %v", getServiceName(service), backendPoolID, err) + allErrs = append(allErrs, err) + } continue } @@ -1601,8 +1670,8 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, } // Fail if there are other errors. - if len(errors) > 0 { - return utilerrors.Flatten(utilerrors.NewAggregate(errors)) + if len(allErrs) > 0 { + return utilerrors.Flatten(utilerrors.NewAggregate(allErrs)) } // Ensure the backendPoolID is also deleted on VMSS itself. diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go index 5abfec1936e9..3aa5319f99e9 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go @@ -58,6 +58,11 @@ type vmssEntry struct { lastUpdate time.Time } +type availabilitySetEntry struct { + vmNames sets.String + nodeNames sets.String +} + func (ss *scaleSet) newVMSSCache() (*azcache.TimedCache, error) { getter := func(key string) (interface{}, error) { localCache := &sync.Map{} // [vmssName]*vmssEntry @@ -193,6 +198,11 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cac } computerName := strings.ToLower(*vm.OsProfile.ComputerName) + if vm.NetworkProfile == nil || vm.NetworkProfile.NetworkInterfaces == nil { + klog.Warningf("skip caching vmssVM %s since its network profile hasn't initialized yet (probably still under creating)", computerName) + continue + } + vmssVMCacheEntry := &vmssVirtualMachinesEntry{ resourceGroup: resourceGroupName, vmssName: vmssName, @@ -273,7 +283,7 @@ func (ss *scaleSet) deleteCacheForNode(nodeName string) error { func (ss *scaleSet) newAvailabilitySetNodesCache() (*azcache.TimedCache, error) { getter := func(key string) (interface{}, error) { - localCache := sets.NewString() + vmNames := sets.NewString() resourceGroups, err := ss.GetResourceGroups() if err != nil { return nil, err @@ -287,11 +297,22 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*azcache.TimedCache, error) for _, vm := range vmList { if vm.Name != nil { - localCache.Insert(*vm.Name) + vmNames.Insert(*vm.Name) } } } + // store all the node names in the cluster when the cache data was created. + nodeNames, err := ss.GetNodeNames() + if err != nil { + return nil, err + } + + localCache := availabilitySetEntry{ + vmNames: vmNames, + nodeNames: nodeNames, + } + return localCache, nil } @@ -313,6 +334,16 @@ func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string, crt azcache. return false, err } - availabilitySetNodes := cached.(sets.String) - return availabilitySetNodes.Has(nodeName), nil + cachedNodes := cached.(availabilitySetEntry).nodeNames + // if the node is not in the cache, assume the node has joined after the last cache refresh and attempt to refresh the cache. + if !cachedNodes.Has(nodeName) { + klog.V(2).Infof("Node %s has joined the cluster since the last VM cache refresh, refreshing the cache", nodeName) + cached, err = ss.availabilitySetNodesCache.Get(availabilitySetNodesKey, azcache.CacheReadTypeForceRefresh) + if err != nil { + return false, err + } + } + + cachedVMs := cached.(availabilitySetEntry).vmNames + return cachedVMs.Has(nodeName), nil } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go b/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go index 26ae4ea6b220..49f2d0fdea4e 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go @@ -76,7 +76,7 @@ func (err *Error) Error() error { retryAfterSeconds = int(err.RetryAfter.Sub(curTime) / time.Second) } - return fmt.Errorf("Retriable: %v, RetryAfter: %ds, HTTPStatusCode: %d, RawError: %v", + return fmt.Errorf("Retriable: %v, RetryAfter: %ds, HTTPStatusCode: %d, RawError: %w", err.Retriable, retryAfterSeconds, err.HTTPStatusCode, err.RawError) } diff --git a/vendor/k8s.io/legacy-cloud-providers/vsphere/shared_datastore.go b/vendor/k8s.io/legacy-cloud-providers/vsphere/shared_datastore.go index 603ecde9b751..8db2e163cc11 100644 --- a/vendor/k8s.io/legacy-cloud-providers/vsphere/shared_datastore.go +++ b/vendor/k8s.io/legacy-cloud-providers/vsphere/shared_datastore.go @@ -153,7 +153,7 @@ func (shared *sharedDatastore) getNodeHosts(ctx context.Context, nodes []NodeInf var vmoList []mo.VirtualMachine err := pc.Retrieve(ctx, vmRefs, []string{nameProperty, runtimeHost}, &vmoList) if err != nil { - klog.Errorf("SharedHost.getNodeHosts: unable to fetch vms from datacenter %s: %w", nodeInfo.dataCenter.String(), err) + klog.Errorf("SharedHost.getNodeHosts: unable to fetch vms from datacenter %s: %v", nodeInfo.dataCenter.String(), err) return nil, err } var hostMoList []mo.HostSystem @@ -169,7 +169,7 @@ func (shared *sharedDatastore) getNodeHosts(ctx context.Context, nodes []NodeInf pc = property.DefaultCollector(nodeInfo.dataCenter.Client()) err = pc.Retrieve(ctx, hostRefs, []string{summary}, &hostMoList) if err != nil { - klog.Errorf("SharedHost.getNodeHosts: unable to fetch hosts from datacenter %s: %w", nodeInfo.dataCenter.String(), err) + klog.Errorf("SharedHost.getNodeHosts: unable to fetch hosts from datacenter %s: %v", nodeInfo.dataCenter.String(), err) return nil, err } var hosts []hostInfo diff --git a/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go b/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go index 350029a04c5e..fb9db9eb7822 100644 --- a/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go +++ b/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go @@ -936,11 +936,10 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN return "", err } - // try and get canonical path for disk and if we can't throw error - vmDiskPath, err = getcanonicalVolumePath(ctx, vm.Datacenter, vmDiskPath) - if err != nil { - klog.Errorf("failed to get canonical path for %s on node %s: %v", vmDiskPath, convertToString(nodeName), err) - return "", err + // try and get canonical path for disk and if we can't use provided vmDiskPath + canonicalPath, pathFetchErr := getcanonicalVolumePath(ctx, vm.Datacenter, vmDiskPath) + if canonicalPath != "" && pathFetchErr == nil { + vmDiskPath = canonicalPath } diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyName: storagePolicyName}) diff --git a/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere_util.go b/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere_util.go index 0b778a6a7cc0..68fb9c5c7364 100644 --- a/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere_util.go +++ b/vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere_util.go @@ -639,7 +639,7 @@ func (vs *VSphere) BuildMissingVolumeNodeMap(ctx context.Context) { // Start go routines per VC-DC to check disks are attached wg.Add(1) go func(nodes []k8stypes.NodeName) { - err := vs.checkNodeDisks(ctx, nodeNames) + err := vs.checkNodeDisks(ctx, nodes) if err != nil { klog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err) } diff --git a/vendor/k8s.io/mount-utils/fake_mounter.go b/vendor/k8s.io/mount-utils/fake_mounter.go index 393ed043ba04..55ea5e2986b2 100644 --- a/vendor/k8s.io/mount-utils/fake_mounter.go +++ b/vendor/k8s.io/mount-utils/fake_mounter.go @@ -136,6 +136,10 @@ func (f *FakeMounter) MountSensitiveWithoutSystemd(source string, target string, return f.MountSensitive(source, target, fstype, options, nil /* sensitiveOptions */) } +func (f *FakeMounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { + return f.MountSensitive(source, target, fstype, options, nil /* sensitiveOptions */) +} + // Unmount records the unmount event and updates the in-memory mount points for FakeMounter func (f *FakeMounter) Unmount(target string) error { f.mutex.Lock() diff --git a/vendor/k8s.io/mount-utils/go.mod b/vendor/k8s.io/mount-utils/go.mod index 2ef6cd2d155d..659d5ac5f6ad 100644 --- a/vendor/k8s.io/mount-utils/go.mod +++ b/vendor/k8s.io/mount-utils/go.mod @@ -9,7 +9,7 @@ require ( github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/stretchr/testify v1.6.1 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect - k8s.io/klog/v2 v2.8.0 + k8s.io/klog/v2 v2.9.0 k8s.io/utils v0.0.0-20210521133846-da695404a2bc ) diff --git a/vendor/k8s.io/mount-utils/go.sum b/vendor/k8s.io/mount-utils/go.sum index 863d0aec99a8..902424d7934e 100644 --- a/vendor/k8s.io/mount-utils/go.sum +++ b/vendor/k8s.io/mount-utils/go.sum @@ -25,7 +25,7 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/utils v0.0.0-20210521133846-da695404a2bc h1:dx6VGe+PnOW/kD/2UV4aUSsRfJGd7+lcqgJ6Xg0HwUs= k8s.io/utils v0.0.0-20210521133846-da695404a2bc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/vendor/k8s.io/mount-utils/mount.go b/vendor/k8s.io/mount-utils/mount.go index 93b60d3f9220..a882fcc73991 100644 --- a/vendor/k8s.io/mount-utils/mount.go +++ b/vendor/k8s.io/mount-utils/mount.go @@ -49,6 +49,8 @@ type Interface interface { MountSensitive(source string, target string, fstype string, options []string, sensitiveOptions []string) error // MountSensitiveWithoutSystemd is the same as MountSensitive() but this method disable using systemd mount. MountSensitiveWithoutSystemd(source string, target string, fstype string, options []string, sensitiveOptions []string) error + // MountSensitiveWithoutSystemdWithMountFlags is the same as MountSensitiveWithoutSystemd() with additional mount flags + MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error // Unmount unmounts given target. Unmount(target string) error // List returns a list of all mounted filesystems. This can be large. diff --git a/vendor/k8s.io/mount-utils/mount_linux.go b/vendor/k8s.io/mount-utils/mount_linux.go index 10a1c3f01068..7097eae0876e 100644 --- a/vendor/k8s.io/mount-utils/mount_linux.go +++ b/vendor/k8s.io/mount-utils/mount_linux.go @@ -87,11 +87,11 @@ func (mounter *Mounter) MountSensitive(source string, target string, fstype stri mounterPath := "" bind, bindOpts, bindRemountOpts, bindRemountOptsSensitive := MakeBindOptsSensitive(options, sensitiveOptions) if bind { - err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, true) + err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, nil /* mountFlags */, true) if err != nil { return err } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, true) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, nil /* mountFlags */, true) } // The list of filesystems that require containerized mounter on GCI image cluster fsTypesNeedMounter := map[string]struct{}{ @@ -103,19 +103,24 @@ func (mounter *Mounter) MountSensitive(source string, target string, fstype stri if _, ok := fsTypesNeedMounter[fstype]; ok { mounterPath = mounter.mounterPath } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, true) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, nil /* mountFlags */, true) } // MountSensitiveWithoutSystemd is the same as MountSensitive() but disable using systemd mount. func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target string, fstype string, options []string, sensitiveOptions []string) error { + return mounter.MountSensitiveWithoutSystemdWithMountFlags(source, target, fstype, options, sensitiveOptions, nil /* mountFlags */) +} + +// MountSensitiveWithoutSystemdWithMountFlags is the same as MountSensitiveWithoutSystemd with additional mount flags. +func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { mounterPath := "" bind, bindOpts, bindRemountOpts, bindRemountOptsSensitive := MakeBindOptsSensitive(options, sensitiveOptions) if bind { - err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, false) + err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, mountFlags, false) if err != nil { return err } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, false) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, mountFlags, false) } // The list of filesystems that require containerized mounter on GCI image cluster fsTypesNeedMounter := map[string]struct{}{ @@ -127,14 +132,14 @@ func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target strin if _, ok := fsTypesNeedMounter[fstype]; ok { mounterPath = mounter.mounterPath } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, false) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, mountFlags, false) } // doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used. // sensitiveOptions is an extension of options except they will not be logged (because they may contain sensitive material) // systemdMountRequired is an extension of option to decide whether uses systemd mount. -func (mounter *Mounter) doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string, sensitiveOptions []string, systemdMountRequired bool) error { - mountArgs, mountArgsLogStr := MakeMountArgsSensitive(source, target, fstype, options, sensitiveOptions) +func (mounter *Mounter) doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string, systemdMountRequired bool) error { + mountArgs, mountArgsLogStr := MakeMountArgsSensitiveWithMountFlags(source, target, fstype, options, sensitiveOptions, mountFlags) if len(mounterPath) > 0 { mountArgs = append([]string{mountCmd}, mountArgs...) mountArgsLogStr = mountCmd + " " + mountArgsLogStr @@ -217,10 +222,22 @@ func MakeMountArgs(source, target, fstype string, options []string) (mountArgs [ // MakeMountArgsSensitive makes the arguments to the mount(8) command. // sensitiveOptions is an extension of options except they will not be logged (because they may contain sensitive material) func MakeMountArgsSensitive(source, target, fstype string, options []string, sensitiveOptions []string) (mountArgs []string, mountArgsLogStr string) { + return MakeMountArgsSensitiveWithMountFlags(source, target, fstype, options, sensitiveOptions, nil /* mountFlags */) +} + +// MakeMountArgsSensitiveWithMountFlags makes the arguments to the mount(8) command. +// sensitiveOptions is an extension of options except they will not be logged (because they may contain sensitive material) +// mountFlags are additional mount flags that are not related with the fstype +// and mount options +func MakeMountArgsSensitiveWithMountFlags(source, target, fstype string, options []string, sensitiveOptions []string, mountFlags []string) (mountArgs []string, mountArgsLogStr string) { // Build mount command as follows: - // mount [-t $fstype] [-o $options] [$source] $target + // mount [$mountFlags] [-t $fstype] [-o $options] [$source] $target mountArgs = []string{} mountArgsLogStr = "" + + mountArgs = append(mountArgs, mountFlags...) + mountArgsLogStr += strings.Join(mountFlags, " ") + if len(fstype) > 0 { mountArgs = append(mountArgs, "-t", fstype) mountArgsLogStr += strings.Join(mountArgs, " ") diff --git a/vendor/k8s.io/mount-utils/mount_unsupported.go b/vendor/k8s.io/mount-utils/mount_unsupported.go index 0e8e683ae3a3..d2aac9a74831 100644 --- a/vendor/k8s.io/mount-utils/mount_unsupported.go +++ b/vendor/k8s.io/mount-utils/mount_unsupported.go @@ -53,6 +53,11 @@ func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target strin return errUnsupported } +// MountSensitiveWithoutSystemdWithMountFlags always returns an error on unsupported platforms +func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { + return errUnsupported +} + // Unmount always returns an error on unsupported platforms func (mounter *Mounter) Unmount(target string) error { return errUnsupported diff --git a/vendor/k8s.io/mount-utils/mount_windows.go b/vendor/k8s.io/mount-utils/mount_windows.go index 29d3bbbd376e..a893f52131c0 100644 --- a/vendor/k8s.io/mount-utils/mount_windows.go +++ b/vendor/k8s.io/mount-utils/mount_windows.go @@ -64,6 +64,12 @@ func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target strin return mounter.MountSensitive(source, target, fstype, options, sensitiveOptions /* sensitiveOptions */) } +// MountSensitiveWithoutSystemdWithMountFlags is the same as MountSensitiveWithoutSystemd with additional mount flags +// Windows not supported systemd mount, this function degrades to MountSensitive(). +func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { + return mounter.MountSensitive(source, target, fstype, options, sensitiveOptions /* sensitiveOptions */) +} + // MountSensitive is the same as Mount() but this method allows // sensitiveOptions to be passed in a separate parameter from the normal // mount options and ensures the sensitiveOptions are never logged. This diff --git a/vendor/modules.txt b/vendor/modules.txt index 680ea3451c86..7e8be1a8e17a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -135,6 +135,8 @@ github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile +# github.com/bits-and-blooms/bitset v1.2.0 +github.com/bits-and-blooms/bitset # github.com/blang/semver v3.5.1+incompatible ## explicit github.com/blang/semver @@ -315,7 +317,7 @@ github.com/gogo/protobuf/types github.com/golang/groupcache/lru # github.com/golang/mock v1.4.4 github.com/golang/mock/gomock -# github.com/golang/protobuf v1.4.3 +# github.com/golang/protobuf v1.5.0 ## explicit github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto @@ -374,7 +376,7 @@ github.com/google/cadvisor/utils/sysinfo github.com/google/cadvisor/version github.com/google/cadvisor/watcher github.com/google/cadvisor/zfs -# github.com/google/go-cmp v0.5.4 +# github.com/google/go-cmp v0.5.5 ## explicit github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/cmpopts @@ -608,7 +610,7 @@ github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.0.0-rc95.0.20210608002938-1f5126fe967e => github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e +# github.com/opencontainers/runc v1.0.2 => github.com/kolyshkin/runc v1.0.0-rc95.0.20211216181318-624f590e289a github.com/opencontainers/runc/libcontainer github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/capabilities @@ -636,11 +638,11 @@ github.com/opencontainers/runc/libcontainer/utils github.com/opencontainers/runc/types # github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/selinux v1.8.0 +# github.com/opencontainers/selinux v1.8.2 github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk -# github.com/openshift/api v0.0.0-20210521075222-e273a339932a +# github.com/openshift/api v0.0.0-20210910062324-a41d3573a3ba ## explicit github.com/openshift/api github.com/openshift/api/annotations @@ -703,7 +705,7 @@ github.com/openshift/api/template/v1 github.com/openshift/api/unidling/v1alpha1 github.com/openshift/api/user github.com/openshift/api/user/v1 -# github.com/openshift/apiserver-library-go v0.0.0-20210521113822-91c23a9a7ddf +# github.com/openshift/apiserver-library-go v0.0.0-20211116020226-339bb71f9a26 ## explicit github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1 @@ -804,7 +806,7 @@ github.com/openshift/client-go/user/informers/externalversions/internalinterface github.com/openshift/client-go/user/informers/externalversions/user github.com/openshift/client-go/user/informers/externalversions/user/v1 github.com/openshift/client-go/user/listers/user/v1 -# github.com/openshift/library-go v0.0.0-20210521084623-7392ea9b02ca +# github.com/openshift/library-go v0.0.0-20211109160828-8c48fafbad15 ## explicit github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout @@ -902,7 +904,7 @@ github.com/satori/go.uuid github.com/seccomp/libseccomp-golang # github.com/sergi/go-diff v1.1.0 github.com/sergi/go-diff/diffmatchpatch -# github.com/sirupsen/logrus v1.7.0 +# github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus # github.com/soheilhy/cmux v0.1.4 github.com/soheilhy/cmux @@ -1184,7 +1186,7 @@ golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc # golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d golang.org/x/term -# golang.org/x/text v0.3.4 +# golang.org/x/text v0.3.6 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -1288,7 +1290,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.25.0 +# google.golang.org/protobuf v1.26.0 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -1301,18 +1303,18 @@ google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text google.golang.org/protobuf/internal/errors -google.golang.org/protobuf/internal/fieldsort google.golang.org/protobuf/internal/filedesc google.golang.org/protobuf/internal/filetype google.golang.org/protobuf/internal/flags google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl -google.golang.org/protobuf/internal/mapsort +google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto +google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface @@ -1405,7 +1407,7 @@ gopkg.in/warnings.v0 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c gopkg.in/yaml.v3 -# k8s.io/api v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210629175304-1622f8729964 +# k8s.io/api v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1453,7 +1455,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210629175304-1622f8729964 +# k8s.io/apiextensions-apiserver v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1497,7 +1499,7 @@ k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/test/integration k8s.io/apiextensions-apiserver/test/integration/fixtures -# k8s.io/apimachinery v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210629175304-1622f8729964 +# k8s.io/apimachinery v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/equality @@ -1562,7 +1564,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210629175304-1622f8729964 +# k8s.io/apiserver v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -1704,12 +1706,12 @@ k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/cli-runtime v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210629175304-1622f8729964 +# k8s.io/cli-runtime v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210629175304-1622f8729964 +# k8s.io/client-go v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -2001,7 +2003,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210629175304-1622f8729964 +# k8s.io/cloud-provider v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20211215000257-b4b48133acd2 k8s.io/cloud-provider k8s.io/cloud-provider/api k8s.io/cloud-provider/credentialconfig @@ -2010,12 +2012,12 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210629175304-1622f8729964 +# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20211215000257-b4b48133acd2 k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/component-base v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210629175304-1622f8729964 +# k8s.io/component-base v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag @@ -2038,7 +2040,7 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/term k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/component-helpers v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210629175304-1622f8729964 +# k8s.io/component-helpers v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/apps/poddisruptionbudget @@ -2048,16 +2050,16 @@ k8s.io/component-helpers/node/topology k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/volume -# k8s.io/cri-api v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210629175304-1622f8729964 +# k8s.io/cri-api v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20211215000257-b4b48133acd2 k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210629175304-1622f8729964 +# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20211215000257-b4b48133acd2 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins -# k8s.io/klog/v2 v2.8.0 +# k8s.io/klog/v2 v2.9.0 ## explicit k8s.io/klog/v2 -# k8s.io/kube-aggregator v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210629175304-1622f8729964 +# k8s.io/kube-aggregator v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20211215000257-b4b48133acd2 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 @@ -2101,11 +2103,11 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kube-proxy v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210629175304-1622f8729964 +# k8s.io/kube-proxy v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20211215000257-b4b48133acd2 k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210629175304-1622f8729964 +# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20211215000257-b4b48133acd2 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210629175304-1622f8729964 +# k8s.io/kubectl v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/util @@ -2130,7 +2132,7 @@ k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210629175304-1622f8729964 +# k8s.io/kubelet v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/kubelet/config/v1alpha1 k8s.io/kubelet/config/v1beta1 @@ -2143,7 +2145,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.21.1 => github.com/openshift/kubernetes v1.21.2-0.20210629175304-1622f8729964 +# k8s.io/kubernetes v1.21.1 => github.com/openshift/kubernetes v1.21.3-0.20211215000257-b4b48133acd2 ## explicit k8s.io/kubernetes/cmd/kube-apiserver/app k8s.io/kubernetes/cmd/kube-apiserver/app/options @@ -2175,6 +2177,7 @@ k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/cu k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/dns k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/features k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/image +k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/kubecontrollermanager k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/network k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/oauth k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/project @@ -2899,7 +2902,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/traverse -# k8s.io/legacy-cloud-providers v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210629175304-1622f8729964 +# k8s.io/legacy-cloud-providers v0.21.1 => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20211215000257-b4b48133acd2 ## explicit k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure @@ -2943,7 +2946,7 @@ k8s.io/legacy-cloud-providers/openstack k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/metrics v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210629175304-1622f8729964 +# k8s.io/metrics v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20211215000257-b4b48133acd2 k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 @@ -2952,9 +2955,9 @@ k8s.io/metrics/pkg/apis/external_metrics/v1beta1 k8s.io/metrics/pkg/client/custom_metrics k8s.io/metrics/pkg/client/custom_metrics/scheme k8s.io/metrics/pkg/client/external_metrics -# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210629175304-1622f8729964 +# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20211215000257-b4b48133acd2 k8s.io/mount-utils -# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210629175304-1622f8729964 +# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20211215000257-b4b48133acd2 k8s.io/sample-apiserver/pkg/apis/wardle k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1 # k8s.io/utils v0.0.0-20210521133846-da695404a2bc @@ -2974,9 +2977,14 @@ k8s.io/utils/path k8s.io/utils/pointer k8s.io/utils/strings k8s.io/utils/trace -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client +# sigs.k8s.io/kube-storage-version-migrator v0.0.4 +sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1 +sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset +sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme +sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1 # sigs.k8s.io/kustomize/api v0.8.8 sigs.k8s.io/kustomize/api/builtins sigs.k8s.io/kustomize/api/filesys @@ -3051,7 +3059,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/merge2 sigs.k8s.io/kustomize/kyaml/yaml/merge3 sigs.k8s.io/kustomize/kyaml/yaml/schema sigs.k8s.io/kustomize/kyaml/yaml/walk -# sigs.k8s.io/structured-merge-diff/v4 v4.1.0 +# sigs.k8s.io/structured-merge-diff/v4 v4.1.2 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema @@ -3061,31 +3069,31 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit sigs.k8s.io/yaml # github.com/onsi/ginkgo => github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible -# github.com/opencontainers/runc => github.com/openshift/opencontainers-runc v1.0.0-rc95.0.20210608002938-1f5126fe967e -# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20210629175304-1622f8729964 -# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20210629175304-1622f8729964 -# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20210629175304-1622f8729964 -# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20210629175304-1622f8729964 -# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20210629175304-1622f8729964 -# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20210629175304-1622f8729964 -# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20210629175304-1622f8729964 -# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20210629175304-1622f8729964 -# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20210629175304-1622f8729964 -# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20210629175304-1622f8729964 -# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20210629175304-1622f8729964 -# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20210629175304-1622f8729964 -# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210629175304-1622f8729964 -# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20210629175304-1622f8729964 -# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20210629175304-1622f8729964 -# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20210629175304-1622f8729964 -# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20210629175304-1622f8729964 -# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20210629175304-1622f8729964 -# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20210629175304-1622f8729964 -# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20210629175304-1622f8729964 -# k8s.io/kubernetes => github.com/openshift/kubernetes v1.21.2-0.20210629175304-1622f8729964 -# k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20210629175304-1622f8729964 -# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20210629175304-1622f8729964 -# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20210629175304-1622f8729964 -# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20210629175304-1622f8729964 -# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20210629175304-1622f8729964 -# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20210629175304-1622f8729964 +# github.com/opencontainers/runc => github.com/kolyshkin/runc v1.0.0-rc95.0.20211216181318-624f590e289a +# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/kubernetes => github.com/openshift/kubernetes v1.21.3-0.20211215000257-b4b48133acd2 +# k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20211215000257-b4b48133acd2 +# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20211215000257-b4b48133acd2 diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go index 1d9a4950024e..761ac71fa87f 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go @@ -19,6 +19,7 @@ package client import ( "context" "errors" + "fmt" "io" "math/rand" "net" @@ -34,7 +35,7 @@ import ( type Tunnel interface { // Dial connects to the address on the named network, similar to // what net.Dial does. The only supported protocol is tcp. - Dial(protocol, address string) (net.Conn, error) + DialContext(ctx context.Context, protocol, address string) (net.Conn, error) } type dialResult struct { @@ -49,6 +50,10 @@ type grpcTunnel struct { conns map[int64]*conn pendingDialLock sync.RWMutex connsLock sync.RWMutex + + // The tunnel will be closed if the caller fails to read via conn.Read() + // more than readTimeoutSeconds after a packet has been received. + readTimeoutSeconds int } type clientConn interface { @@ -61,23 +66,24 @@ var _ clientConn = &grpc.ClientConn{} // gRPC based proxy service. // Currently, a single tunnel supports a single connection, and the tunnel is closed when the connection is terminated // The Dial() method of the returned tunnel should only be called once -func CreateSingleUseGrpcTunnel(address string, opts ...grpc.DialOption) (Tunnel, error) { - c, err := grpc.Dial(address, opts...) +func CreateSingleUseGrpcTunnel(ctx context.Context, address string, opts ...grpc.DialOption) (Tunnel, error) { + c, err := grpc.DialContext(ctx, address, opts...) if err != nil { return nil, err } grpcClient := client.NewProxyServiceClient(c) - stream, err := grpcClient.Proxy(context.Background()) + stream, err := grpcClient.Proxy(ctx) if err != nil { return nil, err } tunnel := &grpcTunnel{ - stream: stream, - pendingDial: make(map[int64]chan<- dialResult), - conns: make(map[int64]*conn), + stream: stream, + pendingDial: make(map[int64]chan<- dialResult), + conns: make(map[int64]*conn), + readTimeoutSeconds: 10, } go tunnel.serve(c) @@ -110,10 +116,17 @@ func (t *grpcTunnel) serve(c clientConn) { if !ok { klog.V(1).Infoln("DialResp not recognized; dropped") } else { - ch <- dialResult{ + result := dialResult{ err: resp.Error, connid: resp.ConnectID, } + select { + case ch <- result: + default: + klog.ErrorS(fmt.Errorf("blocked pending channel"), "Received second dial response for connection request", "connectionID", resp.ConnectID, "dialID", resp.Random) + // On multiple dial responses, avoid leaking serve goroutine. + return + } } if resp.Error != "" { @@ -129,7 +142,14 @@ func (t *grpcTunnel) serve(c clientConn) { t.connsLock.RUnlock() if ok { - conn.readCh <- resp.Data + timer := time.NewTimer((time.Duration)(t.readTimeoutSeconds) * time.Second) + select { + case conn.readCh <- resp.Data: + timer.Stop() + case <-timer.C: + klog.ErrorS(fmt.Errorf("timeout"), "readTimeout has been reached, the grpc connection to the proxy server will be closed", "connectionID", conn.connID, "readTimeoutSeconds", t.readTimeoutSeconds) + return + } } else { klog.V(1).InfoS("connection not recognized", "connectionID", resp.ConnectID) } @@ -155,13 +175,13 @@ func (t *grpcTunnel) serve(c clientConn) { // Dial connects to the address on the named network, similar to // what net.Dial does. The only supported protocol is tcp. -func (t *grpcTunnel) Dial(protocol, address string) (net.Conn, error) { +func (t *grpcTunnel) DialContext(ctx context.Context, protocol, address string) (net.Conn, error) { if protocol != "tcp" { return nil, errors.New("protocol not supported") } - random := rand.Int63() - resCh := make(chan dialResult) + random := rand.Int63() /* #nosec G404 */ + resCh := make(chan dialResult, 1) t.pendingDialLock.Lock() t.pendingDial[random] = resCh t.pendingDialLock.Unlock() @@ -199,12 +219,14 @@ func (t *grpcTunnel) Dial(protocol, address string) (net.Conn, error) { } c.connID = res.connid c.readCh = make(chan []byte, 10) - c.closeCh = make(chan string) + c.closeCh = make(chan string, 1) t.connsLock.Lock() t.conns[res.connid] = c t.connsLock.Unlock() case <-time.After(30 * time.Second): - return nil, errors.New("dial timeout") + return nil, errors.New("dial timeout, backstop") + case <-ctx.Done(): + return nil, errors.New("dial timeout, context") } return c, nil diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE b/vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go new file mode 100644 index 000000000000..da6d19a24bb2 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// +groupName=migration.k8s.io +package v1alpha1 diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go new file mode 100644 index 000000000000..f400f747ebbd --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "migration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageVersionMigration{}, + &StorageVersionMigrationList{}, + &StorageState{}, + &StorageStateList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go new file mode 100644 index 000000000000..dde42a5b3749 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go @@ -0,0 +1,187 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient:nonNamespaced + +// StorageVersionMigration represents a migration of stored data to the latest +// storage version. +type StorageVersionMigration struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of the migration. + // +optional + Spec StorageVersionMigrationSpec `json:"spec,omitempty"` + // Status of the migration. + // +optional + Status StorageVersionMigrationStatus `json:"status,omitempty"` +} + +// The names of the group, the version, and the resource. +type GroupVersionResource struct { + // The name of the group. + Group string `json:"group,omitempty"` + // The name of the version. + Version string `json:"version,omitempty"` + // The name of the resource. + Resource string `json:"resource,omitempty"` +} + +// Spec of the storage version migration. +type StorageVersionMigrationSpec struct { + // The resource that is being migrated. The migrator sends requests to + // the endpoint serving the resource. + // Immutable. + Resource GroupVersionResource `json:"resource"` + // The token used in the list options to get the next chunk of objects + // to migrate. When the .status.conditions indicates the migration is + // "Running", users can use this token to check the progress of the + // migration. + // +optional + ContinueToken string `json:"continueToken,omitempty"` + // TODO: consider recording the storage version hash when the migration + // is created. It can avoid races. +} + +type MigrationConditionType string + +const ( + // Indicates that the migration is running. + MigrationRunning MigrationConditionType = "Running" + // Indicates that the migration has completed successfully. + MigrationSucceeded MigrationConditionType = "Succeeded" + // Indicates that the migration has failed. + MigrationFailed MigrationConditionType = "Failed" +) + +// Describes the state of a migration at a certain point. +type MigrationCondition struct { + // Type of the condition. + Type MigrationConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status"` + // The last time this condition was updated. + // +optional + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty"` +} + +// Status of the storage version migration. +type StorageVersionMigrationStatus struct { + // The latest available observations of the migration's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []MigrationCondition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageVersionMigrationList is a collection of storage version migrations. +type StorageVersionMigrationList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of StorageVersionMigration + Items []StorageVersionMigration `json:"items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient:nonNamespaced + +// The state of the storage of a specific resource. +type StorageState struct { + metav1.TypeMeta `json:",inline"` + // The name must be "<.spec.resource.resouce>.<.spec.resource.group>". + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of the storage state. + // +optional + Spec StorageStateSpec `json:"spec,omitempty"` + // Status of the storage state. + // +optional + Status StorageStateStatus `json:"status,omitempty"` +} + +// The names of the group and the resource. +type GroupResource struct { + // The name of the group. + Group string `json:"group,omitempty"` + // The name of the resource. + Resource string `json:"resource,omitempty"` +} + +// Specification of the storage state. +type StorageStateSpec struct { + // The resource this storageState is about. + Resource GroupResource `json:"resource,omitempty"` +} + +// Unknown is a valid value in persistedStorageVersionHashes. +const Unknown = "Unknown" + +// Status of the storage state. +type StorageStateStatus struct { + // The hash values of storage versions that persisted instances of + // spec.resource might still be encoded in. + // "Unknown" is a valid value in the list, and is the default value. + // It is not safe to upgrade or downgrade to an apiserver binary that does not + // support all versions listed in this field, or if "Unknown" is listed. + // Once the storage version migration for this resource has completed, the + // value of this field is refined to only contain the + // currentStorageVersionHash. + // Once the apiserver has changed the storage version, the new storage version + // is appended to the list. + // +optional + PersistedStorageVersionHashes []string `json:"persistedStorageVersionHashes,omitempty"` + // The hash value of the current storage version, as shown in the discovery + // document served by the API server. + // Storage Version is the version to which objects are converted to + // before persisted. + // +optional + CurrentStorageVersionHash string `json:"currentStorageVersionHash,omitempty"` + // LastHeartbeatTime is the last time the storage migration triggering + // controller checks the storage version hash of this resource in the + // discovery document and updates this field. + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageStateList is a collection of storage state. +type StorageStateList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of StorageState + Items []StorageState `json:"items"` +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..5d6374d6095c --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,275 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResource) DeepCopyInto(out *GroupResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource. +func (in *GroupResource) DeepCopy() *GroupResource { + if in == nil { + return nil + } + out := new(GroupResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource. +func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { + if in == nil { + return nil + } + out := new(GroupVersionResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MigrationCondition) DeepCopyInto(out *MigrationCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationCondition. +func (in *MigrationCondition) DeepCopy() *MigrationCondition { + if in == nil { + return nil + } + out := new(MigrationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageState) DeepCopyInto(out *StorageState) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageState. +func (in *StorageState) DeepCopy() *StorageState { + if in == nil { + return nil + } + out := new(StorageState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageState) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStateList) DeepCopyInto(out *StorageStateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStateList. +func (in *StorageStateList) DeepCopy() *StorageStateList { + if in == nil { + return nil + } + out := new(StorageStateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageStateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStateSpec) DeepCopyInto(out *StorageStateSpec) { + *out = *in + out.Resource = in.Resource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStateSpec. +func (in *StorageStateSpec) DeepCopy() *StorageStateSpec { + if in == nil { + return nil + } + out := new(StorageStateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStateStatus) DeepCopyInto(out *StorageStateStatus) { + *out = *in + if in.PersistedStorageVersionHashes != nil { + in, out := &in.PersistedStorageVersionHashes, &out.PersistedStorageVersionHashes + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStateStatus. +func (in *StorageStateStatus) DeepCopy() *StorageStateStatus { + if in == nil { + return nil + } + out := new(StorageStateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigration) DeepCopyInto(out *StorageVersionMigration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigration. +func (in *StorageVersionMigration) DeepCopy() *StorageVersionMigration { + if in == nil { + return nil + } + out := new(StorageVersionMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionMigration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationList) DeepCopyInto(out *StorageVersionMigrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageVersionMigration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationList. +func (in *StorageVersionMigrationList) DeepCopy() *StorageVersionMigrationList { + if in == nil { + return nil + } + out := new(StorageVersionMigrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionMigrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationSpec) DeepCopyInto(out *StorageVersionMigrationSpec) { + *out = *in + out.Resource = in.Resource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationSpec. +func (in *StorageVersionMigrationSpec) DeepCopy() *StorageVersionMigrationSpec { + if in == nil { + return nil + } + out := new(StorageVersionMigrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationStatus) DeepCopyInto(out *StorageVersionMigrationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]MigrationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationStatus. +func (in *StorageVersionMigrationStatus) DeepCopy() *StorageVersionMigrationStatus { + if in == nil { + return nil + } + out := new(StorageVersionMigrationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go new file mode 100644 index 000000000000..d8f4f36dd159 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go @@ -0,0 +1,97 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package clientset + +import ( + "fmt" + + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + MigrationV1alpha1() migrationv1alpha1.MigrationV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + migrationV1alpha1 *migrationv1alpha1.MigrationV1alpha1Client +} + +// MigrationV1alpha1 retrieves the MigrationV1alpha1Client +func (c *Clientset) MigrationV1alpha1() migrationv1alpha1.MigrationV1alpha1Interface { + return c.migrationV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.migrationV1alpha1, err = migrationv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.migrationV1alpha1 = migrationv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.migrationV1alpha1 = migrationv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go new file mode 100644 index 000000000000..ee865e56d1dd --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package clientset diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go new file mode 100644 index 000000000000..7dc3756168fa --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go new file mode 100644 index 000000000000..93f2b6e2e738 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + migrationv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go new file mode 100644 index 000000000000..df51baa4d4c1 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go new file mode 100644 index 000000000000..3ce4f57536cd --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type StorageStateExpansion interface{} + +type StorageVersionMigrationExpansion interface{} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go new file mode 100644 index 000000000000..097a331f01e7 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + rest "k8s.io/client-go/rest" + v1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme" +) + +type MigrationV1alpha1Interface interface { + RESTClient() rest.Interface + StorageStatesGetter + StorageVersionMigrationsGetter +} + +// MigrationV1alpha1Client is used to interact with features provided by the migration.k8s.io group. +type MigrationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *MigrationV1alpha1Client) StorageStates() StorageStateInterface { + return newStorageStates(c) +} + +func (c *MigrationV1alpha1Client) StorageVersionMigrations() StorageVersionMigrationInterface { + return newStorageVersionMigrations(c) +} + +// NewForConfig creates a new MigrationV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*MigrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &MigrationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new MigrationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MigrationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MigrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *MigrationV1alpha1Client { + return &MigrationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MigrationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go new file mode 100644 index 000000000000..8345b36199d6 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + scheme "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme" +) + +// StorageStatesGetter has a method to return a StorageStateInterface. +// A group's client should implement this interface. +type StorageStatesGetter interface { + StorageStates() StorageStateInterface +} + +// StorageStateInterface has methods to work with StorageState resources. +type StorageStateInterface interface { + Create(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.CreateOptions) (*v1alpha1.StorageState, error) + Update(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (*v1alpha1.StorageState, error) + UpdateStatus(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (*v1alpha1.StorageState, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageState, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageStateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageState, err error) + StorageStateExpansion +} + +// storageStates implements StorageStateInterface +type storageStates struct { + client rest.Interface +} + +// newStorageStates returns a StorageStates +func newStorageStates(c *MigrationV1alpha1Client) *storageStates { + return &storageStates{ + client: c.RESTClient(), + } +} + +// Get takes name of the storageState, and returns the corresponding storageState object, and an error if there is any. +func (c *storageStates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageState, err error) { + result = &v1alpha1.StorageState{} + err = c.client.Get(). + Resource("storagestates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageStates that match those selectors. +func (c *storageStates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageStateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.StorageStateList{} + err = c.client.Get(). + Resource("storagestates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageStates. +func (c *storageStates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("storagestates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a storageState and creates it. Returns the server's representation of the storageState, and an error, if there is any. +func (c *storageStates) Create(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.CreateOptions) (result *v1alpha1.StorageState, err error) { + result = &v1alpha1.StorageState{} + err = c.client.Post(). + Resource("storagestates"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageState). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a storageState and updates it. Returns the server's representation of the storageState, and an error, if there is any. +func (c *storageStates) Update(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (result *v1alpha1.StorageState, err error) { + result = &v1alpha1.StorageState{} + err = c.client.Put(). + Resource("storagestates"). + Name(storageState.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageState). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *storageStates) UpdateStatus(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (result *v1alpha1.StorageState, err error) { + result = &v1alpha1.StorageState{} + err = c.client.Put(). + Resource("storagestates"). + Name(storageState.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageState). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the storageState and deletes it. Returns an error if one occurs. +func (c *storageStates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storagestates"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageStates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("storagestates"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched storageState. +func (c *storageStates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageState, err error) { + result = &v1alpha1.StorageState{} + err = c.client.Patch(pt). + Resource("storagestates"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go new file mode 100644 index 000000000000..34fa3a987e36 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + scheme "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme" +) + +// StorageVersionMigrationsGetter has a method to return a StorageVersionMigrationInterface. +// A group's client should implement this interface. +type StorageVersionMigrationsGetter interface { + StorageVersionMigrations() StorageVersionMigrationInterface +} + +// StorageVersionMigrationInterface has methods to work with StorageVersionMigration resources. +type StorageVersionMigrationInterface interface { + Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*v1alpha1.StorageVersionMigration, error) + Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersionMigration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionMigrationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) + StorageVersionMigrationExpansion +} + +// storageVersionMigrations implements StorageVersionMigrationInterface +type storageVersionMigrations struct { + client rest.Interface +} + +// newStorageVersionMigrations returns a StorageVersionMigrations +func newStorageVersionMigrations(c *MigrationV1alpha1Client) *storageVersionMigrations { + return &storageVersionMigrations{ + client: c.RESTClient(), + } +} + +// Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any. +func (c *storageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) { + result = &v1alpha1.StorageVersionMigration{} + err = c.client.Get(). + Resource("storageversionmigrations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors. +func (c *storageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.StorageVersionMigrationList{} + err = c.client.Get(). + Resource("storageversionmigrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageVersionMigrations. +func (c *storageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("storageversionmigrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a storageVersionMigration and creates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. +func (c *storageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + result = &v1alpha1.StorageVersionMigration{} + err = c.client.Post(). + Resource("storageversionmigrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersionMigration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. +func (c *storageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + result = &v1alpha1.StorageVersionMigration{} + err = c.client.Put(). + Resource("storageversionmigrations"). + Name(storageVersionMigration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersionMigration). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *storageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + result = &v1alpha1.StorageVersionMigration{} + err = c.client.Put(). + Resource("storageversionmigrations"). + Name(storageVersionMigration.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersionMigration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the storageVersionMigration and deletes it. Returns an error if one occurs. +func (c *storageVersionMigrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageversionmigrations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("storageversionmigrations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched storageVersionMigration. +func (c *storageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) { + result = &v1alpha1.StorageVersionMigration{} + err = c.client.Patch(pt). + Resource("storageversionmigrations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go index 5a8214ae2d4e..2b98b729cacc 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go @@ -124,13 +124,6 @@ func ReconcileFieldSetWithSchema(fieldset *fieldpath.Set, tv *TypedValue) (*fiel v.schema = tv.schema v.typeRef = tv.typeRef - // We don't reconcile deduced types, which are primarily for use by unstructured CRDs. Deduced - // types do not support atomic or granular tags. Nor does the dynamic schema deduction - // interact well with the reconcile logic. - if v.schema == DeducedParseableType.Schema { - return nil, nil - } - defer v.finished() errs := v.reconcile() @@ -187,19 +180,17 @@ func (v *reconcileWithSchemaWalker) visitListItems(t *schema.List, element *fiel } func (v *reconcileWithSchemaWalker) doList(t *schema.List) (errs ValidationErrors) { - // reconcile lists changed from granular to atomic + // reconcile lists changed from granular to atomic. + // Note that migrations from atomic to granular are not recommended and will + // be treated as if they were always granular. + // + // In this case, the manager that owned the previously atomic field (and all subfields), + // will now own just the top-level field and none of the subfields. if !v.isAtomic && t.ElementRelationship == schema.Atomic { v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields v.toAdd = fieldpath.NewSet(v.path) // add the root of the atomic return errs } - // reconcile lists changed from atomic to granular - if v.isAtomic && t.ElementRelationship == schema.Associative { - v.toAdd, errs = buildGranularFieldSet(v.path, v.value) - if errs != nil { - return errs - } - } if v.fieldSet != nil { errs = v.visitListItems(t, v.fieldSet) } @@ -231,7 +222,18 @@ func (v *reconcileWithSchemaWalker) visitMapItems(t *schema.Map, element *fieldp } func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors) { - // reconcile maps and structs changed from granular to atomic + // We don't currently reconcile deduced types (unstructured CRDs) or maps that contain only unknown + // fields since deduced types do not yet support atomic or granular tags. + if isUntypedDeducedMap(t) { + return errs + } + + // reconcile maps and structs changed from granular to atomic. + // Note that migrations from atomic to granular are not recommended and will + // be treated as if they were always granular. + // + // In this case the manager that owned the previously atomic field (and all subfields), + // will now own just the top-level field and none of the subfields. if !v.isAtomic && t.ElementRelationship == schema.Atomic { if v.fieldSet != nil && v.fieldSet.Size() > 0 { v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields @@ -239,34 +241,12 @@ func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors) } return errs } - // reconcile maps changed from atomic to granular - if v.isAtomic && (t.ElementRelationship == schema.Separable || t.ElementRelationship == "") { - v.toAdd, errs = buildGranularFieldSet(v.path, v.value) - if errs != nil { - return errs - } - } if v.fieldSet != nil { errs = v.visitMapItems(t, v.fieldSet) } return errs } -func buildGranularFieldSet(path fieldpath.Path, value *TypedValue) (*fieldpath.Set, ValidationErrors) { - - valueFieldSet, err := value.ToFieldSet() - if err != nil { - return nil, errorf("toFieldSet: %v", err) - } - if valueFieldSetAtPath, ok := fieldSetAtPath(valueFieldSet, path); ok { - result := fieldpath.NewSet(path) - resultAtPath := descendToPath(result, path) - *resultAtPath = *valueFieldSetAtPath - return result, nil - } - return nil, nil -} - func fieldSetAtPath(node *fieldpath.Set, path fieldpath.Path) (*fieldpath.Set, bool) { ok := true for _, pe := range path { @@ -293,3 +273,18 @@ func typeRefAtPath(t *schema.Map, pe fieldpath.PathElement) (schema.TypeRef, boo } return tr, tr != schema.TypeRef{} } + +// isUntypedDeducedMap returns true if m has no fields defined, but allows untyped elements. +// This is equivalent to a openAPI object that has x-kubernetes-preserve-unknown-fields=true +// but does not have any properties defined on the object. +func isUntypedDeducedMap(m *schema.Map) bool { + return isUntypedDeducedRef(m.ElementType) && m.Fields == nil +} + +func isUntypedDeducedRef(t schema.TypeRef) bool { + if t.NamedType != nil { + return *t.NamedType == "__untyped_deduced_" + } + atom := t.Inlined + return atom.Scalar != nil && *atom.Scalar == "untyped" +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go index c3e15180a7b1..a338d761d43f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -51,10 +51,22 @@ func (w *removingWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { + if !w.value.IsList() { + return nil + } l := w.value.AsListUsing(w.allocator) defer w.allocator.Free(l) - // If list is null, empty, or atomic just return - if l == nil || l.Length() == 0 || t.ElementRelationship == schema.Atomic { + // If list is null or empty just return + if l == nil || l.Length() == 0 { + return nil + } + + // atomic lists should return everything in the case of extract + // and nothing in the case of remove (!w.shouldExtract) + if t.ElementRelationship == schema.Atomic { + if w.shouldExtract { + w.out = w.value.Unstructured() + } return nil } @@ -70,7 +82,7 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { // but ignore them when we are removing (i.e. !w.shouldExtract) if w.toRemove.Has(path) { if w.shouldExtract { - newItems = append(newItems, item.Unstructured()) + newItems = append(newItems, removeItemsWithSchema(item, w.toRemove, w.schema, t.ElementType, w.shouldExtract).Unstructured()) } else { continue } @@ -92,12 +104,24 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { } func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { + if !w.value.IsMap() { + return nil + } m := w.value.AsMapUsing(w.allocator) if m != nil { defer w.allocator.Free(m) } - // If map is null, empty, or atomic just return - if m == nil || m.Empty() || t.ElementRelationship == schema.Atomic { + // If map is null or empty just return + if m == nil || m.Empty() { + return nil + } + + // atomic maps should return everything in the case of extract + // and nothing in the case of remove (!w.shouldExtract) + if t.ElementRelationship == schema.Atomic { + if w.shouldExtract { + w.out = w.value.Unstructured() + } return nil } @@ -118,7 +142,8 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { // but ignore them when we are removing (i.e. !w.shouldExtract) if w.toRemove.Has(path) { if w.shouldExtract { - newMap[k] = val.Unstructured() + newMap[k] = removeItemsWithSchema(val, w.toRemove, w.schema, fieldType, w.shouldExtract).Unstructured() + } return true }