From 1aba998ac7d3aea4dc2b34e36933eabfd83adf17 Mon Sep 17 00:00:00 2001 From: Mulham Raee Date: Tue, 26 Nov 2024 17:07:47 +0100 Subject: [PATCH 1/2] bump github.com/openshift/hypershift/api --- go.mod | 80 +- go.sum | 172 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1991 ++++++-- .../aws/aws-sdk-go/aws/request/waiter.go | 13 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../protocol/query/queryutil/queryutil.go | 4 +- .../aws/aws-sdk-go/service/ec2/api.go | 2340 ++++++++-- .../aws/aws-sdk-go/service/ssooidc/api.go | 162 +- .../aws/aws-sdk-go/service/ssooidc/errors.go | 8 + .../github.com/fsnotify/fsnotify/.cirrus.yml | 7 +- .../fsnotify/fsnotify/.editorconfig | 12 - .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 3 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 34 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 120 +- .../fsnotify/fsnotify/backend_fen.go | 324 +- .../fsnotify/fsnotify/backend_inotify.go | 594 +-- .../fsnotify/fsnotify/backend_kqueue.go | 747 ++- .../fsnotify/fsnotify/backend_other.go | 204 +- .../fsnotify/fsnotify/backend_windows.go | 305 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 368 +- .../fsnotify/fsnotify/internal/darwin.go | 39 + .../fsnotify/internal/debug_darwin.go | 57 + .../fsnotify/internal/debug_dragonfly.go | 33 + .../fsnotify/internal/debug_freebsd.go | 42 + .../fsnotify/internal/debug_kqueue.go | 32 + .../fsnotify/fsnotify/internal/debug_linux.go | 56 + .../fsnotify/internal/debug_netbsd.go | 25 + .../fsnotify/internal/debug_openbsd.go | 28 + .../fsnotify/internal/debug_solaris.go | 45 + .../fsnotify/internal/debug_windows.go | 40 + .../fsnotify/fsnotify/internal/freebsd.go | 31 + .../fsnotify/fsnotify/internal/internal.go | 2 + .../fsnotify/fsnotify/internal/unix.go | 31 + .../fsnotify/fsnotify/internal/unix2.go | 7 + .../fsnotify/fsnotify/internal/windows.go | 41 + vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 259 - .../fsnotify/fsnotify/system_bsd.go | 1 - .../fsnotify/fsnotify/system_darwin.go | 1 - .../github.com/golang/protobuf/ptypes/any.go | 180 - .../golang/protobuf/ptypes/any/any.pb.go | 62 - .../github.com/golang/protobuf/ptypes/doc.go | 10 - .../golang/protobuf/ptypes/duration.go | 76 - .../protobuf/ptypes/duration/duration.pb.go | 63 - .../golang/protobuf/ptypes/timestamp.go | 112 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 - .../gnostic-models/compiler/extensions.go | 8 +- .../gnostic-models/extensions/extensions.go | 6 +- .../openapiv3/annotations.pb.go | 182 + .../openapiv3/annotations.proto | 56 + .../github.com/google/pprof/profile/encode.go | 5 + .../github.com/google/pprof/profile/merge.go | 5 + .../google/pprof/profile/profile.go | 7 +- .../gophercloud/gophercloud/v2/CHANGELOG.md | 13 + .../gophercloud/gophercloud/v2/README.md | 2 - .../gophercloud/gophercloud/v2/doc.go | 148 - .../gophercloud/v2/provider_client.go | 2 +- .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 32 + .../klauspost/compress/.goreleaser.yml | 123 + vendor/github.com/klauspost/compress/LICENSE | 304 ++ .../github.com/klauspost/compress/README.md | 721 +++ .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 167 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 683 +++ .../klauspost/compress/fse/decompress.go | 376 ++ .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 229 + .../klauspost/compress/huff0/bitwriter.go | 102 + .../klauspost/compress/huff0/compress.go | 742 +++ .../klauspost/compress/huff0/decompress.go | 1167 +++++ .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 830 ++++ .../compress/huff0/decompress_generic.go | 299 ++ .../klauspost/compress/huff0/huff0.go | 337 ++ .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 ++ .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 ++ .../compress/internal/snapref/encode_other.go | 250 + .../compress/internal/snapref/snappy.go | 98 + vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 ++ .../klauspost/compress/zstd/bitreader.go | 136 + .../klauspost/compress/zstd/bitwriter.go | 112 + .../klauspost/compress/zstd/blockdec.go | 731 +++ .../klauspost/compress/zstd/blockenc.go | 909 ++++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 261 ++ .../klauspost/compress/zstd/decoder.go | 948 ++++ .../compress/zstd/decoder_options.go | 169 + .../klauspost/compress/zstd/dict.go | 565 +++ .../klauspost/compress/zstd/enc_base.go | 173 + .../klauspost/compress/zstd/enc_best.go | 560 +++ .../klauspost/compress/zstd/enc_better.go | 1252 +++++ .../klauspost/compress/zstd/enc_dfast.go | 1123 +++++ .../klauspost/compress/zstd/enc_fast.go | 891 ++++ .../klauspost/compress/zstd/encoder.go | 642 +++ .../compress/zstd/encoder_options.go | 339 ++ .../klauspost/compress/zstd/framedec.go | 415 ++ .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 ++ .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 73 + .../klauspost/compress/zstd/fse_encoder.go | 701 +++ .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 71 + .../compress/zstd/internal/xxhash/xxhash.go | 230 + .../zstd/internal/xxhash/xxhash_amd64.s | 210 + .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 66 + .../compress/zstd/matchlen_generic.go | 33 + .../klauspost/compress/zstd/seqdec.go | 503 ++ .../klauspost/compress/zstd/seqdec_amd64.go | 394 ++ .../klauspost/compress/zstd/seqdec_amd64.s | 4151 +++++++++++++++++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 114 + .../klauspost/compress/zstd/snappy.go | 434 ++ .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 125 + vendor/github.com/kylelemons/godebug/LICENSE | 202 + .../kylelemons/godebug/diff/diff.go | 186 + vendor/github.com/onsi/gomega/CHANGELOG.md | 59 + vendor/github.com/onsi/gomega/gomega_dsl.go | 26 +- .../onsi/gomega/internal/async_assertion.go | 12 +- .../onsi/gomega/internal/duration_bundle.go | 17 +- .../github.com/onsi/gomega/internal/gomega.go | 8 + .../gomega/internal/polling_signal_error.go | 11 + .../onsi/gomega/matchers/be_empty_matcher.go | 16 +- .../onsi/gomega/matchers/consist_of.go | 32 +- .../matchers/contain_element_matcher.go | 239 +- .../matchers/contain_elements_matcher.go | 5 +- .../onsi/gomega/matchers/have_each_matcher.go | 40 +- .../gomega/matchers/have_exact_elements.go | 58 +- .../onsi/gomega/matchers/have_field.go | 36 +- .../onsi/gomega/matchers/have_key_matcher.go | 19 +- .../matchers/have_key_with_value_matcher.go | 26 +- .../onsi/gomega/matchers/have_len_matcher.go | 2 +- .../internal/miter/type_support_iter.go | 128 + .../internal/miter/type_support_noiter.go | 44 + .../bipartitegraph/bipartitegraphmatching.go | 7 + .../onsi/gomega/matchers/type_support.go | 13 + vendor/github.com/onsi/gomega/types/types.go | 2 + vendor/github.com/openshift/api/features.md | 4 +- .../hypershift/api/hypershift/register.go | 3 - .../api/hypershift/v1alpha1/clusterconfig.go | 15 - .../api/hypershift/v1alpha1/conditions.go | 118 - .../hypershift/api/hypershift/v1alpha1/doc.go | 14 - .../v1alpha1/endpointservice_types.go | 102 - .../hypershift/v1alpha1/groupversion_info.go | 40 - .../v1alpha1/hosted_controlplane.go | 362 -- .../v1alpha1/hostedcluster_types.go | 2222 --------- .../api/hypershift/v1alpha1/nodepool_types.go | 1034 ---- .../v1alpha1/zz_generated.deepcopy.go | 2599 ----------- .../api/hypershift/v1beta1/agent.go | 18 + .../hypershift/api/hypershift/v1beta1/aws.go | 864 ++++ .../api/hypershift/v1beta1/azure.go | 571 +++ ...certificatesigningrequestapproval_types.go | 3 +- .../api/hypershift/v1beta1/clusterconfig.go | 35 + .../v1beta1/controlplanecomponent_types.go | 96 + .../v1beta1/endpointservice_types.go | 5 +- .../hypershift/v1beta1/groupversion_info.go | 3 +- .../hypershift/v1beta1/hosted_controlplane.go | 19 +- .../v1beta1/hostedcluster_conditions.go | 19 + .../hypershift/v1beta1/hostedcluster_types.go | 2160 ++------- .../api/hypershift/v1beta1/ibmcloud.go | 68 + .../api/hypershift/v1beta1/kubevirt.go | 382 ++ .../hypershift/v1beta1/nodepool_conditions.go | 31 + .../api/hypershift/v1beta1/nodepool_types.go | 768 +-- .../api/hypershift/v1beta1/openstack.go | 420 ++ .../api/hypershift/v1beta1/powervs.go | 295 ++ .../v1beta1/zz_generated.deepcopy.go | 386 +- ..._generated.featuregated-crd-manifests.yaml | 231 + .../clustersizingconfiguration_types.go | 8 + .../v1alpha1/zz_generated.deepcopy.go | 10 + .../hypershift/api/util/ipnet/ipnet.go | 2 + .../hypershift/v1alpha1/aescbcspec.go | 51 - .../v1alpha1/agentnodepoolplatform.go | 42 - .../hypershift/v1alpha1/agentplatformspec.go | 38 - .../hypershift/v1alpha1/apiendpoint.go | 47 - .../v1alpha1/apiservernetworking.go | 62 - .../v1alpha1/awscloudproviderconfig.go | 56 - .../hypershift/v1alpha1/awskmsauthspec.go | 51 - .../hypershift/v1alpha1/awskmskeyentry.go | 38 - .../hypershift/v1alpha1/awskmsspec.go | 65 - .../v1alpha1/awsnodepoolplatform.go | 102 - .../hypershift/v1alpha1/awsplatformspec.go | 159 - .../hypershift/v1alpha1/awsplatformstatus.go | 38 - .../v1alpha1/awsresourcereference.go | 52 - .../hypershift/v1alpha1/awsresourcetag.go | 47 - .../hypershift/v1alpha1/awsrolecredentials.go | 56 - .../hypershift/v1alpha1/awsrolesref.go | 92 - .../hypershift/v1alpha1/awsserviceendpoint.go | 47 - .../hypershift/v1alpha1/azurekmskey.go | 56 - .../hypershift/v1alpha1/azurekmsspec.go | 47 - .../v1alpha1/azurenodepoolplatform.go | 110 - .../hypershift/v1alpha1/azureplatformspec.go | 114 - .../hypershift/v1alpha1/clusterautoscaling.go | 65 - .../v1alpha1/clusterconfiguration.go | 158 - .../v1alpha1/clusternetworkentry.go | 51 - .../hypershift/v1alpha1/clusternetworking.go | 120 - .../v1alpha1/clusterversionstatus.go | 84 - .../hypershift/v1alpha1/diagnostics.go | 47 - .../hypershift/v1alpha1/dnsspec.go | 65 - .../hypershift/v1alpha1/etcdspec.go | 60 - .../hypershift/v1alpha1/etcdtlsconfig.go | 42 - .../hypershift/v1alpha1/filter.go | 49 - .../hypershift/v1alpha1/hostedcluster.go | 218 - .../hypershift/v1alpha1/hostedclusterspec.go | 294 -- .../v1alpha1/hostedclusterstatus.go | 111 - .../v1alpha1/ibmcloudkmsauthspec.go | 60 - .../v1alpha1/ibmcloudkmskeyentry.go | 74 - .../hypershift/v1alpha1/ibmcloudkmsspec.go | 61 - .../v1alpha1/ibmcloudkmsunmanagedauthspec.go | 42 - .../v1alpha1/ibmcloudplatformspec.go | 42 - .../hypershift/v1alpha1/imagecontentsource.go | 49 - .../hypershift/v1alpha1/inplaceupgrade.go | 42 - .../hypershift/v1alpha1/kmsspec.go | 69 - .../v1alpha1/kubevirtcachingstrategy.go | 42 - .../hypershift/v1alpha1/kubevirtcompute.go | 61 - .../hypershift/v1alpha1/kubevirtdiskimage.go | 38 - .../kubevirtmanualstoragedriverconfig.go | 57 - .../hypershift/v1alpha1/kubevirtnetwork.go | 38 - .../v1alpha1/kubevirtnodepoolplatform.go | 98 - .../v1alpha1/kubevirtnodepoolstatus.go | 47 - .../v1alpha1/kubevirtpersistentvolume.go | 73 - .../v1alpha1/kubevirtplatformcredentials.go | 47 - .../v1alpha1/kubevirtplatformspec.go | 65 - .../hypershift/v1alpha1/kubevirtrootvolume.go | 68 - .../v1alpha1/kubevirtstorageclassmapping.go | 56 - .../v1alpha1/kubevirtstoragedriverspec.go | 51 - .../hypershift/v1alpha1/kubevirtvolume.go | 51 - .../kubevirtvolumesnapshotclassmapping.go | 56 - .../loadbalancerpublishingstrategy.go | 38 - .../v1alpha1/machinenetworkentry.go | 42 - .../v1alpha1/managedetcdstoragespec.go | 62 - .../hypershift/v1alpha1/nodepool.go | 218 - .../v1alpha1/nodepoolautoscaling.go | 47 - .../hypershift/v1alpha1/nodepoolcondition.go | 97 - .../hypershift/v1alpha1/nodepoolmanagement.go | 69 - .../hypershift/v1alpha1/nodepoolplatform.go | 96 - .../v1alpha1/nodepoolplatformstatus.go | 38 - .../hypershift/v1alpha1/nodepoolspec.go | 184 - .../hypershift/v1alpha1/nodepoolstatus.go | 70 - .../v1alpha1/nodeportpublishingstrategy.go | 47 - .../persistentvolumeetcdstoragespec.go | 51 - .../hypershift/v1alpha1/platformspec.go | 96 - .../hypershift/v1alpha1/platformstatus.go | 38 - .../v1alpha1/powervsnodepoolplatform.go | 97 - .../v1alpha1/powervsplatformspec.go | 150 - .../hypershift/v1alpha1/powervsvpc.go | 65 - .../hypershift/v1alpha1/release.go | 38 - .../hypershift/v1alpha1/replaceupgrade.go | 51 - .../hypershift/v1alpha1/rollingupdate.go | 51 - .../v1alpha1/routepublishingstrategy.go | 38 - .../v1alpha1/secretencryptionspec.go | 60 - .../v1alpha1/servicenetworkentry.go | 42 - .../v1alpha1/servicepublishingstrategy.go | 69 - .../servicepublishingstrategymapping.go | 85 - .../hypershift/v1alpha1/taint.go | 60 - .../hypershift/v1alpha1/unmanagedetcdspec.go | 47 - .../hypershift/v1alpha1/volume.go | 74 - .../hypershift/v1beta1/allocationpool.go | 47 + .../hypershift/v1beta1/awsnodepoolplatform.go | 9 + .../hypershift/v1beta1/awsplatformspec.go | 9 + .../hypershift/v1beta1/awssharedvpc.go | 47 + .../v1beta1/awssharedvpcrolesref.go | 47 + .../hypershift/v1beta1/azurekmsspec.go | 13 +- .../v1beta1/azuremarketplaceimage.go | 65 + .../hypershift/v1beta1/azurenodepoolosdisk.go | 69 + .../v1beta1/azurenodepoolplatform.go | 65 +- .../hypershift/v1beta1/azureplatformspec.go | 34 +- .../v1beta1/azureresourcemanagedidentities.go | 38 + .../hypershift/v1beta1/azurevmimage.go | 60 + .../v1beta1/controlplanemanagedidentities.go | 110 + .../hypershift/v1beta1/diagnostics.go | 18 +- .../hypershift/v1beta1/filterbyneutrontags.go | 77 + .../hypershift/v1beta1/hostedclusterspec.go | 11 + .../hypershift/v1beta1/hostedclusterstatus.go | 10 + .../v1beta1/hostedcontrolplanespec.go | 11 + .../hypershift/v1beta1/kubevirthostdevice.go | 47 + .../v1beta1/kubevirtnodepoolplatform.go | 26 +- .../managedazurekeyvault.go} | 26 +- .../hypershift/v1beta1/managedidentity.go | 47 + .../hypershift/v1beta1/networkfilter.go | 101 + .../networkparam.go} | 26 +- .../hypershift/v1beta1/nodepoolplatform.go | 23 +- .../v1beta1/openstackidentityreference.go | 47 + .../v1beta1/openstacknodepoolplatform.go | 56 + .../v1beta1/openstackplatformspec.go | 122 + .../placementoptions.go} | 22 +- .../hypershift/v1beta1/platformspec.go | 23 +- .../hypershift/v1beta1/routerfilter.go | 101 + .../hypershift/v1beta1/routerparam.go | 47 + .../hypershift/v1beta1/subnetfilter.go | 146 + .../hypershift/v1beta1/subnetparam.go | 47 + .../hypershift/v1beta1/subnetspec.go | 54 + .../v1beta1/usermanageddiagnostics.go | 38 + .../scheduling/v1alpha1/effects.go | 30 +- .../client/clientset/clientset/clientset.go | 13 - .../clientset/fake/clientset_generated.go | 7 - .../clientset/clientset/fake/register.go | 2 - .../clientset/clientset/scheme/register.go | 2 - .../typed/hypershift/v1alpha1/doc.go | 19 - .../typed/hypershift/v1alpha1/fake/doc.go | 19 - .../v1alpha1/fake/fake_hostedcluster.go | 188 - .../v1alpha1/fake/fake_hypershift_client.go | 43 - .../hypershift/v1alpha1/fake/fake_nodepool.go | 188 - .../v1alpha1/generated_expansion.go | 22 - .../hypershift/v1alpha1/hostedcluster.go | 255 - .../hypershift/v1alpha1/hypershift_client.go | 111 - .../typed/hypershift/v1alpha1/nodepool.go | 255 - .../informers/externalversions/generic.go | 7 - .../externalversions/hypershift/interface.go | 8 - .../hypershift/v1alpha1/hostedcluster.go | 89 - .../hypershift/v1alpha1/interface.go | 51 - .../hypershift/v1alpha1/nodepool.go | 89 - .../v1alpha1/expansion_generated.go | 34 - .../hypershift/v1alpha1/hostedcluster.go | 98 - .../listers/hypershift/v1alpha1/nodepool.go | 98 - .../prometheus/client_golang/NOTICE | 5 - .../internal/github.com/golang/gddo/LICENSE | 27 + .../golang/gddo/httputil/header/header.go | 145 + .../golang/gddo/httputil/negotiate.go | 36 + .../collectors/go_collector_latest.go | 4 +- .../client_golang/prometheus/go_collector.go | 55 +- .../prometheus/go_collector_latest.go | 19 +- .../client_golang/prometheus/histogram.go | 268 +- .../internal/go_collector_options.go | 2 + .../client_golang/prometheus/metric.go | 2 +- .../prometheus/process_collector.go | 29 +- .../prometheus/process_collector_other.go | 14 + .../prometheus/promhttp/delegator.go | 6 + .../client_golang/prometheus/promhttp/http.go | 113 +- .../client_golang/prometheus/registry.go | 17 +- .../client_golang/prometheus/summary.go | 42 + .../testutil/promlint/validation.go | 1 + .../validations/duplicate_validations.go | 37 + .../validations/generic_name_validations.go | 20 +- .../prometheus/testutil/testutil.go | 108 +- .../client_golang/prometheus/vec.go | 2 +- .../prometheus/common/expfmt/decode.go | 14 +- .../prometheus/common/expfmt/encode.go | 24 +- .../prometheus/common/expfmt/expfmt.go | 76 +- .../common/expfmt/openmetrics_create.go | 2 +- .../prometheus/common/expfmt/text_create.go | 4 +- .../prometheus/common/expfmt/text_parse.go | 162 +- .../prometheus/common/model/labels.go | 27 +- .../common/model/labelset_string.go | 2 - .../common/model/labelset_string_go120.go | 39 - .../prometheus/common/model/metric.go | 31 +- .../go.etcd.io/etcd/api/v3/version/version.go | 2 +- .../etcd/client/pkg/v3/logutil/zap.go | 2 +- .../etcd/client/pkg/v3/transport/listener.go | 50 +- vendor/go.etcd.io/etcd/client/v3/watch.go | 2 +- vendor/go.opentelemetry.io/otel/.gitignore | 8 - vendor/go.opentelemetry.io/otel/.golangci.yml | 15 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 168 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 6 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 26 +- vendor/go.opentelemetry.io/otel/Makefile | 24 +- vendor/go.opentelemetry.io/otel/README.md | 34 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 12 +- .../go.opentelemetry.io/otel/attribute/set.go | 40 +- .../otel/baggage/baggage.go | 150 +- .../go.opentelemetry.io/otel/codes/codes.go | 2 +- vendor/go.opentelemetry.io/otel/doc.go | 2 + .../otel/internal/global/instruments.go | 14 +- .../otel/internal/global/meter.go | 382 +- .../otel/internal/global/trace.go | 8 +- .../otel/internal/rawhelpers.go | 12 +- .../otel/metric/asyncfloat64.go | 2 +- .../otel/metric/asyncint64.go | 2 +- .../otel/metric/instrument.go | 2 +- .../go.opentelemetry.io/otel/metric/meter.go | 13 + vendor/go.opentelemetry.io/otel/renovate.json | 8 + .../otel/semconv/internal/http.go | 2 +- .../go.opentelemetry.io/otel/trace/context.go | 2 +- vendor/go.opentelemetry.io/otel/trace/doc.go | 2 +- .../otel/trace/provider.go | 59 + vendor/go.opentelemetry.io/otel/trace/span.go | 177 + .../go.opentelemetry.io/otel/trace/trace.go | 249 - .../go.opentelemetry.io/otel/trace/tracer.go | 37 + .../otel/trace/tracestate.go | 10 + .../otel/verify_examples.sh | 74 - .../otel/verify_released_changelog.sh | 42 + vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 17 +- .../x/crypto/internal/poly1305/mac_noasm.go | 2 +- .../{sum_ppc64le.go => sum_ppc64x.go} | 2 +- .../poly1305/{sum_ppc64le.s => sum_ppc64x.s} | 30 +- vendor/golang.org/x/exp/LICENSE | 4 +- vendor/golang.org/x/net/html/doc.go | 7 +- vendor/golang.org/x/net/html/iter.go | 56 + vendor/golang.org/x/net/html/node.go | 4 + .../x/net/http2/client_conn_pool.go | 8 +- vendor/golang.org/x/net/http2/config.go | 122 + vendor/golang.org/x/net/http2/config_go124.go | 61 + .../x/net/http2/config_pre_go124.go | 16 + vendor/golang.org/x/net/http2/http2.go | 53 +- vendor/golang.org/x/net/http2/server.go | 215 +- vendor/golang.org/x/net/http2/transport.go | 385 +- vendor/golang.org/x/net/http2/unencrypted.go | 32 + vendor/golang.org/x/net/http2/write.go | 10 + .../golang.org/x/net/websocket/websocket.go | 2 +- vendor/golang.org/x/oauth2/LICENSE | 4 +- vendor/golang.org/x/oauth2/README.md | 15 +- vendor/golang.org/x/oauth2/token.go | 7 + .../golang.org/x/sys/cpu/asm_darwin_x86_gc.s | 17 + vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go | 61 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 4 +- .../x/sys/cpu/{cpu_x86.s => cpu_gc_x86.s} | 2 +- vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 6 - .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 1 - vendor/golang.org/x/sys/cpu/cpu_other_x86.go | 11 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 6 +- .../x/sys/cpu/syscall_darwin_x86_gc.go | 98 + vendor/golang.org/x/sys/unix/README.md | 2 +- vendor/golang.org/x/sys/unix/ioctl_linux.go | 96 + vendor/golang.org/x/sys/unix/mkerrors.sh | 16 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 2 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 64 +- .../x/sys/unix/syscall_linux_arm64.go | 2 + .../x/sys/unix/syscall_linux_loong64.go | 2 + .../x/sys/unix/syscall_linux_riscv64.go | 2 + .../x/sys/unix/syscall_zos_s390x.go | 104 +- .../golang.org/x/sys/unix/vgetrandom_linux.go | 13 + .../x/sys/unix/vgetrandom_unsupported.go | 11 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 35 +- .../x/sys/unix/zerrors_linux_386.go | 19 + .../x/sys/unix/zerrors_linux_amd64.go | 19 + .../x/sys/unix/zerrors_linux_arm.go | 19 + .../x/sys/unix/zerrors_linux_arm64.go | 19 + .../x/sys/unix/zerrors_linux_loong64.go | 19 + .../x/sys/unix/zerrors_linux_mips.go | 19 + .../x/sys/unix/zerrors_linux_mips64.go | 19 + .../x/sys/unix/zerrors_linux_mips64le.go | 19 + .../x/sys/unix/zerrors_linux_mipsle.go | 19 + .../x/sys/unix/zerrors_linux_ppc.go | 19 + .../x/sys/unix/zerrors_linux_ppc64.go | 19 + .../x/sys/unix/zerrors_linux_ppc64le.go | 19 + .../x/sys/unix/zerrors_linux_riscv64.go | 19 + .../x/sys/unix/zerrors_linux_s390x.go | 19 + .../x/sys/unix/zerrors_linux_sparc64.go | 19 + .../golang.org/x/sys/unix/zsyscall_linux.go | 27 +- .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 2 +- .../x/sys/unix/zsysnum_linux_loong64.go | 2 + .../x/sys/unix/zsysnum_linux_riscv64.go | 2 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 208 +- .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 6 + .../golang.org/x/sys/windows/dll_windows.go | 2 +- .../x/sys/windows/syscall_windows.go | 34 +- .../golang.org/x/sys/windows/types_windows.go | 126 + .../x/sys/windows/zsyscall_windows.go | 53 + vendor/golang.org/x/term/README.md | 11 +- .../x/text/internal/catmsg/codec.go | 2 +- vendor/golang.org/x/time/LICENSE | 4 +- vendor/golang.org/x/time/rate/rate.go | 17 +- .../protobuf/encoding/protojson/decode.go | 2 +- .../protobuf/encoding/protojson/encode.go | 4 +- .../encoding/protojson/well_known_types.go | 6 +- .../protobuf/internal/descopts/options.go | 20 +- .../editiondefaults/editions_defaults.binpb | Bin 93 -> 99 bytes .../internal/editionssupport/editions.go | 7 +- .../protobuf/internal/filedesc/desc.go | 19 + .../protobuf/internal/filedesc/desc_init.go | 2 + .../protobuf/internal/filedesc/desc_lazy.go | 2 + .../protobuf/internal/filedesc/editions.go | 6 +- .../protobuf/internal/genid/doc.go | 2 +- .../internal/genid/go_features_gen.go | 32 +- .../protobuf/internal/genid/map_entry.go | 2 +- .../protobuf/internal/genid/wrappers.go | 2 +- .../protobuf/internal/impl/codec_extension.go | 11 +- .../protobuf/internal/impl/codec_field.go | 3 + .../protobuf/internal/impl/codec_message.go | 3 + .../protobuf/internal/impl/codec_reflect.go | 210 - .../protobuf/internal/impl/codec_unsafe.go | 3 - .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/encode.go | 2 +- .../protobuf/internal/impl/equal.go | 224 + .../internal/impl/legacy_extension.go | 1 + .../protobuf/internal/impl/message.go | 4 +- .../protobuf/internal/impl/pointer_reflect.go | 215 - .../protobuf/internal/impl/pointer_unsafe.go | 3 - .../protobuf/internal/strs/strings_pure.go | 28 - .../internal/strs/strings_unsafe_go120.go | 3 +- .../internal/strs/strings_unsafe_go121.go | 3 +- .../protobuf/internal/version/version.go | 2 +- .../google.golang.org/protobuf/proto/equal.go | 9 + .../protobuf/proto/extension.go | 71 + .../protobuf/reflect/protodesc/desc.go | 12 +- .../protobuf/reflect/protodesc/desc_init.go | 4 + .../protobuf/reflect/protodesc/editions.go | 7 +- .../protobuf/reflect/protoreflect/methods.go | 10 + .../reflect/protoreflect/value_pure.go | 60 - .../protoreflect/value_unsafe_go120.go | 3 +- .../protoreflect/value_unsafe_go121.go | 3 +- .../protobuf/runtime/protoiface/methods.go | 18 + .../types/descriptorpb/descriptor.pb.go | 748 +-- .../types/gofeaturespb/go_features.pb.go | 155 +- .../protobuf/types/known/anypb/any.pb.go | 24 +- .../types/known/durationpb/duration.pb.go | 24 +- .../protobuf/types/known/emptypb/empty.pb.go | 24 +- .../types/known/fieldmaskpb/field_mask.pb.go | 24 +- .../types/known/structpb/struct.pb.go | 110 +- .../types/known/timestamppb/timestamp.pb.go | 24 +- .../types/known/wrapperspb/wrappers.pb.go | 200 +- vendor/k8s.io/apiserver/pkg/server/config.go | 2 +- .../apiserver/pkg/server/options/tracing.go | 8 + .../storage/resource_encoding_config.go | 19 +- .../pkg/server/storage/storage_factory.go | 2 +- .../kube-openapi/pkg/util/proto/document.go | 2 +- vendor/k8s.io/utils/lru/lru.go | 12 + vendor/modules.txt | 123 +- .../structured-merge-diff/v4/fieldpath/set.go | 277 ++ .../structured-merge-diff/v4/merge/update.go | 68 +- .../structured-merge-diff/v4/typed/compare.go | 10 + .../structured-merge-diff/v4/typed/parser.go | 2 +- .../v4/value/reflectcache.go | 63 +- .../structured-merge-diff/v4/value/value.go | 2 +- 542 files changed, 44969 insertions(+), 22982 deletions(-) delete mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/internal.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix2.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go create mode 100644 vendor/github.com/google/gnostic-models/openapiv3/annotations.proto delete mode 100644 vendor/github.com/gophercloud/gophercloud/v2/doc.go create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/kylelemons/godebug/LICENSE create mode 100644 vendor/github.com/kylelemons/godebug/diff/diff.go create mode 100644 vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go create mode 100644 vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/register.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/clusterconfig.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/conditions.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/doc.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/endpointservice_types.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/groupversion_info.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/hosted_controlplane.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/hostedcluster_types.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/nodepool_types.go delete mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/ibmcloud.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/kubevirt.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/openstack.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/powervs.go create mode 100644 vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.featuregated-crd-manifests.yaml delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/aescbcspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/agentnodepoolplatform.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/agentplatformspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/apiendpoint.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/apiservernetworking.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awscloudproviderconfig.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmsauthspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmskeyentry.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmsspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsnodepoolplatform.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsplatformspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsplatformstatus.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsresourcereference.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsresourcetag.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsrolecredentials.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsrolesref.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsserviceendpoint.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurekmskey.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurekmsspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurenodepoolplatform.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azureplatformspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterautoscaling.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterconfiguration.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusternetworkentry.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusternetworking.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterversionstatus.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/diagnostics.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/dnsspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/etcdspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/etcdtlsconfig.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/filter.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedcluster.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedclusterspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedclusterstatus.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsauthspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmskeyentry.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsunmanagedauthspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudplatformspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/imagecontentsource.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/inplaceupgrade.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kmsspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtcachingstrategy.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtcompute.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtdiskimage.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtmanualstoragedriverconfig.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnetwork.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolplatform.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolstatus.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtpersistentvolume.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtplatformcredentials.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtplatformspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtrootvolume.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtstorageclassmapping.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtstoragedriverspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtvolume.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtvolumesnapshotclassmapping.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/loadbalancerpublishingstrategy.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/machinenetworkentry.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/managedetcdstoragespec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepool.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolautoscaling.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolcondition.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolmanagement.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolplatform.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolplatformstatus.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolstatus.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodeportpublishingstrategy.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/persistentvolumeetcdstoragespec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/platformspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/platformstatus.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsnodepoolplatform.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsplatformspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsvpc.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/release.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/replaceupgrade.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/rollingupdate.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/routepublishingstrategy.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/secretencryptionspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicenetworkentry.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicepublishingstrategy.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicepublishingstrategymapping.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/taint.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/unmanagedetcdspec.go delete mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/volume.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/allocationpool.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awssharedvpcrolesref.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azuremarketplaceimage.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurenodepoolosdisk.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/filterbyneutrontags.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/kubevirthostdevice.go rename vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/{v1alpha1/kubeconfigsecretref.go => v1beta1/managedazurekeyvault.go} (51%) create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/managedidentity.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/networkfilter.go rename vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/{v1alpha1/powervsresourcereference.go => v1beta1/networkparam.go} (51%) create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstackidentityreference.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go rename vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/{v1alpha1/managedetcdspec.go => v1beta1/placementoptions.go} (50%) create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/routerfilter.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/routerparam.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetparam.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetspec.go create mode 100644 vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/usermanageddiagnostics.go delete mode 100644 vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/doc.go delete mode 100644 vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/doc.go delete mode 100644 vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_hostedcluster.go delete mode 100644 vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_hypershift_client.go delete mode 100644 vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_nodepool.go delete mode 100644 vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/generated_expansion.go delete mode 100644 vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/hostedcluster.go delete mode 100644 vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/hypershift_client.go delete mode 100644 vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/nodepool.go delete mode 100644 vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/hostedcluster.go delete mode 100644 vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/interface.go delete mode 100644 vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/nodepool.go delete mode 100644 vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/expansion_generated.go delete mode 100644 vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/hostedcluster.go delete mode 100644 vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/nodepool.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go delete mode 100644 vendor/github.com/prometheus/common/model/labelset_string_go120.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracer.go delete mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh create mode 100644 vendor/go.opentelemetry.io/otel/verify_released_changelog.sh rename vendor/golang.org/x/crypto/internal/poly1305/{sum_ppc64le.go => sum_ppc64x.go} (95%) rename vendor/golang.org/x/crypto/internal/poly1305/{sum_ppc64le.s => sum_ppc64x.s} (89%) create mode 100644 vendor/golang.org/x/net/html/iter.go create mode 100644 vendor/golang.org/x/net/http2/config.go create mode 100644 vendor/golang.org/x/net/http2/config_go124.go create mode 100644 vendor/golang.org/x/net/http2/config_pre_go124.go create mode 100644 vendor/golang.org/x/net/http2/unencrypted.go create mode 100644 vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go rename vendor/golang.org/x/sys/cpu/{cpu_x86.s => cpu_gc_x86.s} (94%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go create mode 100644 vendor/golang.org/x/sys/unix/vgetrandom_linux.go create mode 100644 vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/equal.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go diff --git a/go.mod b/go.mod index ca21a900f..e8d6f53ec 100644 --- a/go.mod +++ b/go.mod @@ -5,30 +5,30 @@ go 1.22.0 toolchain go1.22.5 require ( - github.com/aws/aws-sdk-go v1.52.6 + github.com/aws/aws-sdk-go v1.55.5 github.com/evanphx/json-patch v5.9.0+incompatible github.com/google/go-cmp v0.6.0 - github.com/gophercloud/gophercloud/v2 v2.1.1 + github.com/gophercloud/gophercloud/v2 v2.2.0 github.com/gophercloud/utils/v2 v2.0.0-20240812072210-8ce1fc0f2894 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0 - github.com/onsi/gomega v1.33.1 - github.com/openshift/api v0.0.0-20241121204516-053bb8a33f6d + github.com/onsi/gomega v1.36.0 + github.com/openshift/api v0.0.0-20241126141851-807d6dfccb05 github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660 github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f - github.com/openshift/hypershift v0.1.39 - github.com/openshift/hypershift/api v0.0.0-20240725153211-8b880bdd20d1 + github.com/openshift/hypershift v0.1.51 + github.com/openshift/hypershift/api v0.0.0-20241126143550-da24e17fcccb github.com/openshift/library-go v0.0.0-20241120135057-fc703a7407c9 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.5 github.com/spf13/cobra v1.8.1 gopkg.in/ini.v1 v1.67.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.31.1 - k8s.io/apiextensions-apiserver v0.31.1 - k8s.io/apimachinery v0.31.1 - k8s.io/client-go v0.31.1 - k8s.io/component-base v0.31.1 + k8s.io/api v0.31.3 + k8s.io/apiextensions-apiserver v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/client-go v0.31.3 + k8s.io/component-base v0.31.3 k8s.io/klog/v2 v2.130.1 - k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 + k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 sigs.k8s.io/kustomize/kyaml v0.17.1 sigs.k8s.io/yaml v1.4.0 ) @@ -47,7 +47,7 @@ require ( github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/felixge/fgprof v0.9.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -60,10 +60,11 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/cel-go v0.20.1 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/imdario/mergo v0.3.16 // indirect @@ -71,59 +72,62 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.21.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/profile v1.7.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.etcd.io/etcd/api/v3 v3.5.14 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect - go.etcd.io/etcd/client/v3 v3.5.14 // indirect + go.etcd.io/etcd/api/v3 v3.5.15 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.15 // indirect + go.etcd.io/etcd/client/v3 v3.5.15 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.27.0 // indirect - golang.org/x/exp v0.0.0-20240707233637-46b078467d37 // indirect - golang.org/x/net v0.29.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/term v0.24.0 // indirect - golang.org/x/text v0.18.0 // indirect - golang.org/x/time v0.5.0 // indirect + golang.org/x/crypto v0.29.0 // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect + golang.org/x/sync v0.9.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/term v0.26.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/time v0.8.0 // indirect google.golang.org/genproto v0.0.0-20240709173604-40e1e62336c5 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d // indirect google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.31.1 // indirect - k8s.io/kms v0.31.1 // indirect - k8s.io/kube-aggregator v0.31.1 // indirect - k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f // indirect + k8s.io/apiserver v0.31.3 // indirect + k8s.io/kms v0.31.3 // indirect + k8s.io/kube-aggregator v0.31.3 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect ) diff --git a/go.sum b/go.sum index 8b4cfda2d..b37b3325a 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8 github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.52.6 h1:nw1AMg0wIj5tTnI89KaDe9G5aISqXm4KJEe1DfNbFvA= -github.com/aws/aws-sdk-go v1.52.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -43,8 +43,8 @@ github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= @@ -73,8 +73,8 @@ github.com/gofrs/uuid/v5 v5.3.0 h1:m0mUMr+oVYUdxpMLgSYCZiXe7PuVPnI94+OMeVBNedk= github.com/gofrs/uuid/v5 v5.3.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -83,8 +83,8 @@ github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -93,16 +93,16 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gophercloud/gophercloud/v2 v2.1.1 h1:KUeVTUoq6um/CijR+hl1JRZ35SXRY62LiYUbgp4qqKw= -github.com/gophercloud/gophercloud/v2 v2.1.1/go.mod h1:f2hMRC7Kakbv5vM7wSGHrIPZh6JZR60GVHryJlF/K44= +github.com/gophercloud/gophercloud/v2 v2.2.0 h1:STqqnSXuhcg1OPBOZ14z6JDm8fKIN13H2bJg6bBuHp8= +github.com/gophercloud/gophercloud/v2 v2.2.0/go.mod h1:f2hMRC7Kakbv5vM7wSGHrIPZh6JZR60GVHryJlF/K44= github.com/gophercloud/utils/v2 v2.0.0-20240812072210-8ce1fc0f2894 h1:KgYK/Mf71IdN7+sq2cVmY8Jtumi26GxahN7nlV0ETBQ= github.com/gophercloud/utils/v2 v2.0.0-20240812072210-8ce1fc0f2894/go.mod h1:IXS9MKM8YBsr9G2xhnMAuiCOaNxmghQFQGi1p68+No0= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -129,12 +129,16 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0 h1:qS4r4ljINLWKJ9m9Ge3Q3sGZ/eIoDVDT2RhAdQFHb1k= github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0/go.mod h1:oGXx2XTEzs9ikW2V6IC1dD8trgjRsS/Mvc2JRiC618Y= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -149,20 +153,20 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= -github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= -github.com/openshift/api v0.0.0-20241121204516-053bb8a33f6d h1:T/WwU+n66JHVuHIdC9yYAdiFLpUFgm5w4l8mvZbu+H8= -github.com/openshift/api v0.0.0-20241121204516-053bb8a33f6d/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y= +github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/api v0.0.0-20241126141851-807d6dfccb05 h1:aRX4ky9M9r/1KyROGkvh3AK3QWBf4HZtyNdY8aDueP4= +github.com/openshift/api v0.0.0-20241126141851-807d6dfccb05/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo= github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660 h1:F0zE2bmdVvaEd18VXuGYQdJJ1FYJu4MIDW9PYZWc9No= github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f h1:FRc0bVNWprihWS0GqQWzb3dY4dkCwpOP3mDw5NwSoR4= github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f/go.mod h1:KiZi2mJRH1TOJ3FtBDYS6YvUL30s/iIXaGSUrSa36mo= -github.com/openshift/hypershift v0.1.39 h1:0LEH7srbBFh/0N+mNvdDhiGQAsbtHt647bj4XJJPbHg= -github.com/openshift/hypershift v0.1.39/go.mod h1:q/TuRvv0WqAjpvCUX7jFRGtXP0aZn4x1Bg0ikiFx2Ds= -github.com/openshift/hypershift/api v0.0.0-20240725153211-8b880bdd20d1 h1:jOikb/zB0IT79YTeepbJQ3VRcUF/fVIhfcWNTKxnyBQ= -github.com/openshift/hypershift/api v0.0.0-20240725153211-8b880bdd20d1/go.mod h1:IDXXroBJeH+nIHkA17S3Yq2QDQg02tMnCWOXoyZVOLY= +github.com/openshift/hypershift v0.1.51 h1:bVQ7tbWv9PmKo7ojc6WRGtvsCvnitDrkMGoOiXA2rLY= +github.com/openshift/hypershift v0.1.51/go.mod h1:3pNwg+aBau/tqyuFNqZLAxVU0gDmutDRgHOyZ3QRqH0= +github.com/openshift/hypershift/api v0.0.0-20241126143550-da24e17fcccb h1:xVYdpsmpGaMVh/qWLIU6MUSs9XhRq9hw7MliXdQSWC8= +github.com/openshift/hypershift/api v0.0.0-20241126143550-da24e17fcccb/go.mod h1:3UlUlywmXBCEMF3GACTvMAOvv2lU5qzUDvTYFXeGbKU= github.com/openshift/library-go v0.0.0-20241120135057-fc703a7407c9 h1:bwIqO3LDkumwfDKTMRzixNHKUqU7yaKTTAKwENi6JOY= github.com/openshift/library-go v0.0.0-20241120135057-fc703a7407c9/go.mod h1:9B1MYPoLtP9tqjWxcbUNVpwxy68zOH/3EIP6c31dAM0= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= @@ -175,12 +179,12 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 h1:AHzMWDxNiAVscJL6+4wkvFRTpMnJqiaZFEKA/osaBXE= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= @@ -221,14 +225,14 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= -go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0= -go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU= -go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ= -go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI= +go.etcd.io/etcd/api/v3 v3.5.15 h1:3KpLJir1ZEBrYuV2v+Twaa/e2MdDCEZ/70H+lzEiwsk= +go.etcd.io/etcd/api/v3 v3.5.15/go.mod h1:N9EhGzXq58WuMllgH9ZvnEr7SI9pS0k0+DHZezGp7jM= +go.etcd.io/etcd/client/pkg/v3 v3.5.15 h1:fo0HpWz/KlHGMCC+YejpiCmyWDEuIpnTDzpJLB5fWlA= +go.etcd.io/etcd/client/pkg/v3 v3.5.15/go.mod h1:mXDI4NAOwEiszrHCb0aqfAYNCrZP4e9hRca3d1YK8EU= go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8= go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg= -go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg= -go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk= +go.etcd.io/etcd/client/v3 v3.5.15 h1:23M0eY4Fd/inNv1ZfU3AxrbbOdW79r9V9Rl62Nm6ip4= +go.etcd.io/etcd/client/v3 v3.5.15/go.mod h1:CLSJxrYjvLtHsrPKsy7LmZEE+DK2ktfd2bN4RhBMwlU= go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M= go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0= go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA= @@ -239,18 +243,18 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -262,25 +266,25 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= -golang.org/x/exp v0.0.0-20240707233637-46b078467d37 h1:uLDX+AfeFCct3a2C7uIWBKMJIR3CJMhcgfrUAqjRK6w= -golang.org/x/exp v0.0.0-20240707233637-46b078467d37/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -288,22 +292,22 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -316,8 +320,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -335,28 +339,28 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= -k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= -k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= -k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/apiserver v0.31.1 h1:Sars5ejQDCRBY5f7R3QFHdqN3s61nhkpaX8/k1iEw1c= -k8s.io/apiserver v0.31.1/go.mod h1:lzDhpeToamVZJmmFlaLwdYZwd7zB+WYRYIboqA1kGxM= -k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= -k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= -k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= -k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apiextensions-apiserver v0.31.3 h1:+GFGj2qFiU7rGCsA5o+p/rul1OQIq6oYpQw4+u+nciE= +k8s.io/apiextensions-apiserver v0.31.3/go.mod h1:2DSpFhUZZJmn/cr/RweH1cEVVbzFw9YBu4T+U3mf1e4= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.3 h1:+1oHTtCB+OheqFEz375D0IlzHZ5VeQKX1KGXnx+TTuY= +k8s.io/apiserver v0.31.3/go.mod h1:PrxVbebxrxQPFhJk4powDISIROkNMKHibTg9lTRQ0Qg= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= +k8s.io/component-base v0.31.3 h1:DMCXXVx546Rfvhj+3cOm2EUxhS+EyztH423j+8sOwhQ= +k8s.io/component-base v0.31.3/go.mod h1:xME6BHfUOafRgT0rGVBGl7TuSg8Z9/deT7qq6w7qjIU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.31.1 h1:cGLyV3cIwb0ovpP/jtyIe2mEuQ/MkbhmeBF2IYCA9Io= -k8s.io/kms v0.31.1/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94= -k8s.io/kube-aggregator v0.31.1 h1:vrYBTTs3xMrpiEsmBjsLETZE9uuX67oQ8B3i1BFfMPw= -k8s.io/kube-aggregator v0.31.1/go.mod h1:+aW4NX50uneozN+BtoCxI4g7ND922p8Wy3tWKFDiWVk= -k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f h1:2sXuKesAYbRHxL3aE2PN6zX/gcJr22cjrsej+W784Tc= -k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= -k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= -k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kms v0.31.3 h1:XCFmiJn5CCKs8xoOLpCmu42Ubm/KW85wNHybGFcSAYc= +k8s.io/kms v0.31.3/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94= +k8s.io/kube-aggregator v0.31.3 h1:DqHPdTglJHgOfB884AaroyxrML/aL82ASYOh65m7MSk= +k8s.io/kube-aggregator v0.31.3/go.mod h1:Kx59Xjnf0SnY47qf9Or++4y3XCHQ3kR0xk1Di6KFiFU= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 h1:jGnCPejIetjiy2gqaJ5V0NLwTpF4wbQ6cZIItJCSHno= +k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= @@ -365,7 +369,7 @@ sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v4 v4.4.3 h1:sCP7Vv3xx/CWIuTPVN38lUPx0uw0lcLfzaiDa8Ja01A= +sigs.k8s.io/structured-merge-diff/v4 v4.4.3/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 0b94fe7fe..c3516e018 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -74,7 +74,9 @@ const ( ) // AWS ISOE (Europe) partition's regions. -const () +const ( + EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West. +) // AWS ISOF partition's regions. const () @@ -244,13 +246,6 @@ var awsPartition = partition{ }, }, Services: services{ - "a4b": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "access-analyzer": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -822,30 +817,60 @@ var awsPartition = partition{ }, "airflow": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -855,6 +880,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -864,6 +898,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -1060,21 +1097,33 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -4567,91 +4616,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -4857,6 +4821,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "bedrock-ca-central-1", + }: endpoint{ + Hostname: "bedrock.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-eu-central-1", }: endpoint{ @@ -4873,6 +4845,14 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + endpointKey{ + Region: "bedrock-eu-west-2", + }: endpoint{ + Hostname: "bedrock.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, endpointKey{ Region: "bedrock-eu-west-3", }: endpoint{ @@ -4881,6 +4861,14 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "bedrock-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-fips-us-east-1", }: endpoint{ @@ -4929,6 +4917,14 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "bedrock-runtime-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-runtime-eu-central-1", }: endpoint{ @@ -4945,6 +4941,14 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + endpointKey{ + Region: "bedrock-runtime-eu-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, endpointKey{ Region: "bedrock-runtime-eu-west-3", }: endpoint{ @@ -4953,6 +4957,14 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "bedrock-runtime-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-runtime-fips-us-east-1", }: endpoint{ @@ -4969,6 +4981,14 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "bedrock-runtime-sa-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "bedrock-runtime-us-east-1", }: endpoint{ @@ -4985,6 +5005,14 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "bedrock-sa-east-1", + }: endpoint{ + Hostname: "bedrock.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "bedrock-us-east-1", }: endpoint{ @@ -5001,15 +5029,24 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -5034,6 +5071,9 @@ var awsPartition = partition{ }, "braket": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -5064,6 +5104,12 @@ var awsPartition = partition{ }, "cases": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5278,69 +5324,262 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.api.aws", + }, }, }, "cloudcontrolapi": service{ @@ -5348,78 +5587,216 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-west-1.api.aws", + }, endpointKey{ Region: "ca-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -5477,51 +5854,123 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.api.aws", + }, }, }, "clouddirectory": service{ @@ -6949,6 +7398,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6979,6 +7431,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -7094,6 +7549,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -7124,6 +7582,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9042,6 +9503,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9245,9 +9712,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9272,6 +9751,24 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -12577,6 +13074,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -14769,6 +15269,18 @@ var awsPartition = partition{ }, }, }, + "globalaccelerator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "globalaccelerator-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "glue": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -15420,13 +15932,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "honeycode": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "iam": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -15549,6 +16054,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17453,12 +17961,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -19334,6 +19857,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -20793,6 +21319,9 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -20811,6 +21340,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21669,6 +22213,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21984,6 +22531,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22361,6 +22911,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -23004,91 +23562,490 @@ var awsPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.af-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-4", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-4.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-north-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-3.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.il-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "sa-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.sa-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -23534,6 +24491,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -24077,6 +25042,9 @@ var awsPartition = partition{ }, "quicksight": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -24092,15 +25060,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "api", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -25148,9 +26128,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -25205,6 +26191,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -27232,6 +28224,55 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "s3-control.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -30162,6 +31203,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -31528,6 +32572,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -31782,6 +32829,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -32579,41 +33644,115 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "tax": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "tax.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "textract": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -32662,39 +33801,87 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.api.aws", + }, }, }, "thinclient": service{ @@ -33320,6 +34507,21 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -33678,6 +34880,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -33699,6 +34907,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -33708,6 +34919,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -35821,6 +37035,21 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "airflow": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36044,16 +37273,6 @@ var awscnPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, "batch": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36107,9 +37326,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "cloudformation": service{ @@ -37168,10 +38399,28 @@ var awscnPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-north-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, endpointKey{ Region: "cn-northwest-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-northwest-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, }, }, "pipes": service{ @@ -37288,6 +38537,9 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "resource-groups": service{ @@ -38758,16 +40010,12 @@ var awsusgovPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", - }: endpoint{ - Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, + }: endpoint{}, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", }, endpointKey{ Region: "us-gov-east-1-fips", @@ -38779,16 +40027,12 @@ var awsusgovPartition = partition{ }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{ - Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", }, endpointKey{ Region: "us-gov-west-1-fips", @@ -38820,16 +40064,6 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -38880,6 +40114,22 @@ var awsusgovPartition = partition{ }, "bedrock": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "bedrock-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, endpointKey{ Region: "bedrock-runtime-us-gov-west-1", }: endpoint{ @@ -38980,21 +40230,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.api.aws", + }, }, }, "clouddirectory": service{ @@ -39724,20 +40998,40 @@ var awsusgovPartition = partition{ "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", }, }, }, @@ -41724,6 +43018,62 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "kms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -42495,12 +43845,76 @@ var awsusgovPartition = partition{ }, "pi": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -43465,6 +44879,46 @@ var awsusgovPartition = partition{ }, }, }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "serverlessrepo": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -44468,21 +45922,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.api.aws", + }, }, }, "transcribe": service{ @@ -44943,6 +46421,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "appconfig": service{ @@ -45646,42 +47127,12 @@ var awsisoPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - }, }, }, "rbin": service{ @@ -45726,37 +47177,10 @@ var awsisoPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-iso-east-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-iso-west-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "rds.us-iso-east-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, @@ -45765,16 +47189,7 @@ var awsisoPartition = partition{ endpointKey{ Region: "rds.us-iso-west-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, @@ -45787,12 +47202,12 @@ var awsisoPartition = partition{ Region: "us-iso-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", }, endpointKey{ Region: "us-iso-east-1-fips", }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, @@ -45805,12 +47220,12 @@ var awsisoPartition = partition{ Region: "us-iso-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", }, endpointKey{ Region: "us-iso-west-1-fips", }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, @@ -45821,40 +47236,20 @@ var awsisoPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-iso-east-1", + Region: "us-iso-east-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "redshift.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "fips-us-iso-west-1", + Region: "us-iso-west-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "redshift.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", }, }, }, @@ -46342,6 +47737,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -46769,24 +48171,9 @@ var awsisobPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - }, }, }, "rbin": service{ @@ -46813,28 +48200,10 @@ var awsisobPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-isob-east-1", - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "rds.us-isob-east-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, @@ -46847,12 +48216,12 @@ var awsisobPartition = partition{ Region: "us-isob-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", }, endpointKey{ Region: "us-isob-east-1-fips", }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, @@ -46863,22 +48232,12 @@ var awsisobPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-isob-east-1", + Region: "us-isob-east-1", }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", }, }, }, @@ -47207,7 +48566,11 @@ var awsisoePartition = partition{ SignatureVersions: []string{"v4"}, }, }, - Regions: regions{}, + Regions: regions{ + "eu-isoe-west-1": region{ + Description: "EU ISOE West", + }, + }, Services: services{}, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go index 4601f883c..992ed0464 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -256,8 +256,17 @@ func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err erro s := a.Expected.(int) result = s == req.HTTPResponse.StatusCode case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) + switch ex := a.Expected.(type) { + case string: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == ex + } + case bool: + if ex { + result = err != nil + } else { + result = err == nil + } } default: waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 47a7a4b7c..d15e3c84c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.52.6" +const SDKVersion = "1.55.5" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 058334053..2ca0b19db 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -122,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri } func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { + // If it's empty, and not ec2, generate an empty value + if !value.IsNil() && value.Len() == 0 && !q.isEC2 { v.Set(prefix, "") return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 4f0147de2..38db957a3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -58,7 +58,7 @@ func (c *EC2) AcceptAddressTransferRequest(input *AcceptAddressTransferInput) (r // // Accepts an Elastic IP address transfer. For more information, see Accept // a transferred Elastic IP address (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#using-instance-addressing-eips-transfer-accept) -// in the Amazon Virtual Private Cloud User Guide. +// in the Amazon VPC User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -681,7 +681,7 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request. // that you have brought to Amazon Web Services for use with your Amazon Web // Services resources using bring your own IP addresses (BYOIP). For more information, // see Bring Your Own IP Addresses (BYOIP) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // // If you release an Elastic IP address, you might be able to recover it. You // cannot recover an Elastic IP address that you released after it is allocated @@ -689,7 +689,7 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request. // IP address that you released, specify it in this operation. // // For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // // You can allocate a carrier IP address which is a public IP address from a // telecommunication carrier, to a network interface which resides in a subnet @@ -1003,18 +1003,15 @@ func (c *EC2) AssignIpv6AddressesRequest(input *AssignIpv6AddressesInput) (req * // of IPv6 addresses to be automatically assigned from within the subnet's IPv6 // CIDR block range. You can assign as many IPv6 addresses to a network interface // as you can assign private IPv4 addresses, and the limit varies per instance -// type. For information, see IP Addresses Per Network Interface Per Instance -// Type (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) -// in the Amazon Elastic Compute Cloud User Guide. +// type. // // You must specify either the IPv6 addresses or the IPv6 address count in the // request. // // You can optionally use Prefix Delegation on the network interface. You must // specify either the IPV6 Prefix Delegation prefixes, or the IPv6 Prefix Delegation -// count. For information, see Assigning prefixes to Amazon EC2 network interfaces -// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html) -// in the Amazon Elastic Compute Cloud User Guide. +// count. For information, see Assigning prefixes to network interfaces (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html) +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1093,11 +1090,9 @@ func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInp // You can specify one or more specific secondary IP addresses, or you can specify // the number of secondary IP addresses to be automatically assigned within // the subnet's CIDR block range. The number of secondary IP addresses that -// you can assign to an instance varies by instance type. For information about -// instance types, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) -// in the Amazon Elastic Compute Cloud User Guide. For more information about -// Elastic IP addresses, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) -// in the Amazon Elastic Compute Cloud User Guide. +// you can assign to an instance varies by instance type. For more information +// about Elastic IP addresses, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// in the Amazon EC2 User Guide. // // When you move a secondary private IP address to another network interface, // any Elastic IP address that is associated with the IP address is also moved. @@ -1110,9 +1105,8 @@ func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInp // // You can optionally use Prefix Delegation on the network interface. You must // specify either the IPv4 Prefix Delegation prefixes, or the IPv4 Prefix Delegation -// count. For information, see Assigning prefixes to Amazon EC2 network interfaces -// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html) -// in the Amazon Elastic Compute Cloud User Guide. +// count. For information, see Assigning prefixes to network interfaces (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html) +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1185,8 +1179,8 @@ func (c *EC2) AssignPrivateNatGatewayAddressRequest(input *AssignPrivateNatGatew // AssignPrivateNatGatewayAddress API operation for Amazon Elastic Compute Cloud. // -// Assigns one or more private IPv4 addresses to a private NAT gateway. For -// more information, see Work with NAT gateways (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-working-with) +// Assigns private IPv4 addresses to a private NAT gateway. For more information, +// see Work with NAT gateways (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-working-with) // in the Amazon VPC User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1446,7 +1440,7 @@ func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req // its DHCP lease. You can explicitly renew the lease using the operating system // on the instance. // -// For more information, see DHCP options sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) +// For more information, see DHCP option sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) // in the Amazon VPC User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2391,8 +2385,8 @@ func (c *EC2) AssociateTrunkInterfaceRequest(input *AssociateTrunkInterfaceInput // // Associates a branch network interface with a trunk network interface. // -// Before you create the association, run the create-network-interface (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html) -// command and set --interface-type to trunk. You must also create a network +// Before you create the association, use CreateNetworkInterface (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html) +// command and set the interface type to trunk. You must also create a network // interface for each branch network interface that you want to associate with // the trunk network interface. // @@ -3922,7 +3916,7 @@ func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstanc // Cancels the specified Reserved Instance listing in the Reserved Instance // Marketplace. // -// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// For more information, see Sell in the Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) // in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4412,10 +4406,9 @@ func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Reques // When copying snapshots to a Region, copies of encrypted EBS snapshots remain // encrypted. Copies of unencrypted snapshots remain unencrypted, unless you // enable encryption for the snapshot copy operation. By default, encrypted -// snapshot copies use the default Key Management Service (KMS) KMS key; however, -// you can specify a different KMS key. To copy an encrypted snapshot that has -// been shared from another account, you must have permissions for the KMS key -// used to encrypt the snapshot. +// snapshot copies use the default KMS key; however, you can specify a different +// KMS key. To copy an encrypted snapshot that has been shared from another +// account, you must have permissions for the KMS key used to encrypt the snapshot. // // Snapshots copied to an Outpost are encrypted by default using the default // encryption key for the Region, or a different key that you specify in the @@ -5295,7 +5288,7 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // DHCP options. // // The following are the individual DHCP options you can specify. For more information, -// see DHCP options sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) +// see DHCP option sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) // in the Amazon VPC User Guide. // // - domain-name - If you're using AmazonProvidedDNS in us-east-1, specify @@ -5321,7 +5314,7 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // // - netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend // that you specify 2. Broadcast and multicast are not supported. For more -// information about NetBIOS node types, see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). +// information about NetBIOS node types, see RFC 2132 (https://www.ietf.org/rfc/rfc2132.txt). // // - ipv6-address-preferred-lease-time - A value (in seconds, minutes, hours, // or years) for how frequently a running instance with an IPv6 assigned @@ -5566,7 +5559,7 @@ func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Re // Flow log data for a monitored network interface is recorded as flow log records, // which are log events consisting of fields that describe the traffic flow. // For more information, see Flow log records (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records) -// in the Amazon Virtual Private Cloud User Guide. +// in the Amazon VPC User Guide. // // When publishing to CloudWatch Logs, flow log records are published to a log // group, and each network interface has a unique log stream in the log group. @@ -5575,7 +5568,7 @@ func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Re // specified bucket. // // For more information, see VPC Flow Logs (https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html) -// in the Amazon Virtual Private Cloud User Guide. +// in the Amazon VPC User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6179,6 +6172,83 @@ func (c *EC2) CreateIpamWithContext(ctx aws.Context, input *CreateIpamInput, opt return out, req.Send() } +const opCreateIpamExternalResourceVerificationToken = "CreateIpamExternalResourceVerificationToken" + +// CreateIpamExternalResourceVerificationTokenRequest generates a "aws/request.Request" representing the +// client's request for the CreateIpamExternalResourceVerificationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateIpamExternalResourceVerificationToken for more information on using the CreateIpamExternalResourceVerificationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateIpamExternalResourceVerificationTokenRequest method. +// req, resp := client.CreateIpamExternalResourceVerificationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateIpamExternalResourceVerificationToken +func (c *EC2) CreateIpamExternalResourceVerificationTokenRequest(input *CreateIpamExternalResourceVerificationTokenInput) (req *request.Request, output *CreateIpamExternalResourceVerificationTokenOutput) { + op := &request.Operation{ + Name: opCreateIpamExternalResourceVerificationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateIpamExternalResourceVerificationTokenInput{} + } + + output = &CreateIpamExternalResourceVerificationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateIpamExternalResourceVerificationToken API operation for Amazon Elastic Compute Cloud. +// +// Create a verification token. A verification token is an Amazon Web Services-generated +// random value that you can use to prove ownership of an external resource. +// For example, you can use a verification token to validate that you control +// a public IP address range when you bring an IP address range to Amazon Web +// Services (BYOIP). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateIpamExternalResourceVerificationToken for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateIpamExternalResourceVerificationToken +func (c *EC2) CreateIpamExternalResourceVerificationToken(input *CreateIpamExternalResourceVerificationTokenInput) (*CreateIpamExternalResourceVerificationTokenOutput, error) { + req, out := c.CreateIpamExternalResourceVerificationTokenRequest(input) + return out, req.Send() +} + +// CreateIpamExternalResourceVerificationTokenWithContext is the same as CreateIpamExternalResourceVerificationToken with the addition of +// the ability to pass a context and additional request options. +// +// See CreateIpamExternalResourceVerificationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateIpamExternalResourceVerificationTokenWithContext(ctx aws.Context, input *CreateIpamExternalResourceVerificationTokenInput, opts ...request.Option) (*CreateIpamExternalResourceVerificationTokenOutput, error) { + req, out := c.CreateIpamExternalResourceVerificationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateIpamPool = "CreateIpamPool" // CreateIpamPoolRequest generates a "aws/request.Request" representing the @@ -6551,13 +6621,13 @@ func (c *EC2) CreateLaunchTemplateRequest(input *CreateLaunchTemplateInput) (req // launch an instance using RunInstances, you can specify a launch template // instead of providing the launch parameters in the request. For more information, // see Launch an instance from a launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // // To clone an existing launch template as the basis for a new launch template, // use the Amazon EC2 console. The API, SDKs, and CLI do not support cloning // a template. For more information, see Create a launch template from an existing // launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template-from-existing-launch-template) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6644,7 +6714,7 @@ func (c *EC2) CreateLaunchTemplateVersionRequest(input *CreateLaunchTemplateVers // // For more information, see Modify a launch template (manage launch template // versions) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#manage-launch-template-versions) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7512,13 +7582,11 @@ func (c *EC2) CreateNetworkInterfaceRequest(input *CreateNetworkInterfaceInput) // Creates a network interface in the specified subnet. // // The number of IP addresses you can assign to a network interface varies by -// instance type. For more information, see IP Addresses Per ENI Per Instance -// Type (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) -// in the Amazon Virtual Private Cloud User Guide. +// instance type. // // For more information about network interfaces, see Elastic network interfaces // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) in the -// Amazon Elastic Compute Cloud User Guide. +// Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7835,7 +7903,7 @@ func (c *EC2) CreateReplaceRootVolumeTaskRequest(input *CreateReplaceRootVolumeT // from an AMI that has the same key characteristics as that of the instance. // // For more information, see Replace a root volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/replace-root.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7929,7 +7997,7 @@ func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstanc // for purchase. To view the details of your Standard Reserved Instance listing, // you can use the DescribeReservedInstancesListings operation. // -// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// For more information, see Sell in the Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) // in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -8381,9 +8449,9 @@ func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Re // // You can tag your snapshots during creation. For more information, see Tag // your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // -// For more information, see Amazon Elastic Block Store (https://docs.aws.amazon.com/ebs/latest/userguide/what-is-ebs.html) +// For more information, see Amazon EBS (https://docs.aws.amazon.com/ebs/latest/userguide/what-is-ebs.html) // and Amazon EBS encryption (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption.html) // in the Amazon EBS User Guide. // @@ -8546,7 +8614,7 @@ func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSub // Creates a data feed for Spot Instances, enabling you to view Spot Instance // usage logs. You can create one data feed per Amazon Web Services account. // For more information, see Spot Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) -// in the Amazon EC2 User Guide for Linux Instances. +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8799,9 +8867,8 @@ func (c *EC2) CreateSubnetCidrReservationRequest(input *CreateSubnetCidrReservat // // Creates a subnet CIDR reservation. For more information, see Subnet CIDR // reservations (https://docs.aws.amazon.com/vpc/latest/userguide/subnet-cidr-reservation.html) -// in the Amazon Virtual Private Cloud User Guide and Assign prefixes to network -// interfaces (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon VPC User Guide and Assign prefixes to network interfaces (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html) +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9461,7 +9528,7 @@ func (c *EC2) CreateTransitGatewayConnectPeerRequest(input *CreateTransitGateway // family (IPv4 or IPv6). // // For more information, see Connect peers (https://docs.aws.amazon.com/vpc/latest/tgw/tgw-connect.html#tgw-connect-peer) -// in the Transit Gateways Guide. +// in the Amazon Web Services Transit Gateways Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10453,7 +10520,7 @@ func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Reques // // You can tag your volumes during creation. For more information, see Tag your // Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // // For more information, see Create an Amazon EBS volume (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-creating-volume.html) // in the Amazon EBS User Guide. @@ -10699,8 +10766,8 @@ func (c *EC2) CreateVpcEndpointConnectionNotificationRequest(input *CreateVpcEnd // Creates a connection notification for a specified VPC endpoint or VPC endpoint // service. A connection notification notifies you of specific endpoint events. // You must create an SNS topic to receive notifications. For more information, -// see Create a Topic (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html) -// in the Amazon Simple Notification Service Developer Guide. +// see Creating an Amazon SNS topic (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html) +// in the Amazon SNS Developer Guide. // // You can create a connection notification for interface endpoints only. // @@ -10871,8 +10938,8 @@ func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectio // overlapping CIDR blocks. // // Limitations and rules apply to a VPC peering connection. For more information, -// see the limitations (https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-basics.html#vpc-peering-limitations) -// section in the VPC Peering Guide. +// see the VPC peering limitations (https://docs.aws.amazon.com/vpc/latest/peering/vpc-peering-basics.html#vpc-peering-limitations) +// in the VPC Peering Guide. // // The owner of the accepter VPC must accept the peering request to activate // the peering connection. The VPC peering connection request expires after @@ -12306,6 +12373,83 @@ func (c *EC2) DeleteIpamWithContext(ctx aws.Context, input *DeleteIpamInput, opt return out, req.Send() } +const opDeleteIpamExternalResourceVerificationToken = "DeleteIpamExternalResourceVerificationToken" + +// DeleteIpamExternalResourceVerificationTokenRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIpamExternalResourceVerificationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteIpamExternalResourceVerificationToken for more information on using the DeleteIpamExternalResourceVerificationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteIpamExternalResourceVerificationTokenRequest method. +// req, resp := client.DeleteIpamExternalResourceVerificationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteIpamExternalResourceVerificationToken +func (c *EC2) DeleteIpamExternalResourceVerificationTokenRequest(input *DeleteIpamExternalResourceVerificationTokenInput) (req *request.Request, output *DeleteIpamExternalResourceVerificationTokenOutput) { + op := &request.Operation{ + Name: opDeleteIpamExternalResourceVerificationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIpamExternalResourceVerificationTokenInput{} + } + + output = &DeleteIpamExternalResourceVerificationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteIpamExternalResourceVerificationToken API operation for Amazon Elastic Compute Cloud. +// +// Delete a verification token. A verification token is an Amazon Web Services-generated +// random value that you can use to prove ownership of an external resource. +// For example, you can use a verification token to validate that you control +// a public IP address range when you bring an IP address range to Amazon Web +// Services (BYOIP). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteIpamExternalResourceVerificationToken for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteIpamExternalResourceVerificationToken +func (c *EC2) DeleteIpamExternalResourceVerificationToken(input *DeleteIpamExternalResourceVerificationTokenInput) (*DeleteIpamExternalResourceVerificationTokenOutput, error) { + req, out := c.DeleteIpamExternalResourceVerificationTokenRequest(input) + return out, req.Send() +} + +// DeleteIpamExternalResourceVerificationTokenWithContext is the same as DeleteIpamExternalResourceVerificationToken with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteIpamExternalResourceVerificationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteIpamExternalResourceVerificationTokenWithContext(ctx aws.Context, input *DeleteIpamExternalResourceVerificationTokenInput, opts ...request.Option) (*DeleteIpamExternalResourceVerificationTokenOutput, error) { + req, out := c.DeleteIpamExternalResourceVerificationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteIpamPool = "DeleteIpamPool" // DeleteIpamPoolRequest generates a "aws/request.Request" representing the @@ -12739,7 +12883,7 @@ func (c *EC2) DeleteLaunchTemplateVersionsRequest(input *DeleteLaunchTemplateVer // which deletes the launch template and all of its versions. // // For more information, see Delete a launch template version (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-launch-template-versions.html#delete-launch-template-version) -// in the EC2 User Guide. +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -17517,7 +17661,7 @@ func (c *EC2) DescribeAddressTransfersRequest(input *DescribeAddressTransfersInp // // Describes an Elastic IP address transfer. For more information, see Transfer // Elastic IP addresses (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#transfer-EIPs-intro) -// in the Amazon Virtual Private Cloud User Guide. +// in the Amazon VPC User Guide. // // When you transfer an Elastic IP address, there is a two-step handshake between // the source and transfer Amazon Web Services accounts. When the source account @@ -17947,7 +18091,7 @@ func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesI // // For more information about Availability Zones, Local Zones, and Wavelength // Zones, see Regions and zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // // The order of the elements in the response, including those within nested // structures, might vary. Applications should not assume the elements appear @@ -18901,10 +19045,9 @@ func (c *EC2) DescribeClassicLinkInstancesRequest(input *DescribeClassicLinkInst // // This action is deprecated. // -// Describes one or more of your linked EC2-Classic instances. This request -// only returns information about EC2-Classic instances linked to a VPC through -// ClassicLink. You cannot use this request to return information about other -// instances. +// Describes your linked EC2-Classic instances. This request only returns information +// about EC2-Classic instances linked to a VPC through ClassicLink. You cannot +// use this request to return information about other instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -19969,9 +20112,12 @@ func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req * // DescribeDhcpOptions API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your DHCP options sets. +// Describes your DHCP option sets. The default is to describe all your DHCP +// option sets. Alternatively, you can specify specific DHCP option set IDs +// or filter the results to include only the DHCP option sets that match specific +// criteria. // -// For more information, see DHCP options sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) +// For more information, see DHCP option sets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) // in the Amazon VPC User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -20102,7 +20248,10 @@ func (c *EC2) DescribeEgressOnlyInternetGatewaysRequest(input *DescribeEgressOnl // DescribeEgressOnlyInternetGateways API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your egress-only internet gateways. +// Describes your egress-only internet gateways. The default is to describe +// all your egress-only internet gateways. Alternatively, you can specify specific +// egress-only internet gateway IDs or filter the results to include only the +// egress-only internet gateways that match specific criteria. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -20228,11 +20377,9 @@ func (c *EC2) DescribeElasticGpusRequest(input *DescribeElasticGpusInput) (req * // // Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads // that require graphics acceleration, we recommend that you use Amazon EC2 -// G4ad, G4dn, or G5 instances. +// G4, G5, or G6 instances. // // Describes the Elastic Graphics accelerator associated with your instances. -// For more information about Elastic Graphics, see Amazon Elastic Graphics -// (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -23902,7 +24049,10 @@ func (c *EC2) DescribeInternetGatewaysRequest(input *DescribeInternetGatewaysInp // DescribeInternetGateways API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your internet gateways. +// Describes your internet gateways. The default is to describe all your internet +// gateways. Alternatively, you can specify specific internet gateway IDs or +// filter the results to include only the internet gateways that match specific +// criteria. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -24059,6 +24209,83 @@ func (c *EC2) DescribeIpamByoasnWithContext(ctx aws.Context, input *DescribeIpam return out, req.Send() } +const opDescribeIpamExternalResourceVerificationTokens = "DescribeIpamExternalResourceVerificationTokens" + +// DescribeIpamExternalResourceVerificationTokensRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIpamExternalResourceVerificationTokens operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeIpamExternalResourceVerificationTokens for more information on using the DescribeIpamExternalResourceVerificationTokens +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeIpamExternalResourceVerificationTokensRequest method. +// req, resp := client.DescribeIpamExternalResourceVerificationTokensRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIpamExternalResourceVerificationTokens +func (c *EC2) DescribeIpamExternalResourceVerificationTokensRequest(input *DescribeIpamExternalResourceVerificationTokensInput) (req *request.Request, output *DescribeIpamExternalResourceVerificationTokensOutput) { + op := &request.Operation{ + Name: opDescribeIpamExternalResourceVerificationTokens, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIpamExternalResourceVerificationTokensInput{} + } + + output = &DescribeIpamExternalResourceVerificationTokensOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeIpamExternalResourceVerificationTokens API operation for Amazon Elastic Compute Cloud. +// +// Describe verification tokens. A verification token is an Amazon Web Services-generated +// random value that you can use to prove ownership of an external resource. +// For example, you can use a verification token to validate that you control +// a public IP address range when you bring an IP address range to Amazon Web +// Services (BYOIP). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeIpamExternalResourceVerificationTokens for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIpamExternalResourceVerificationTokens +func (c *EC2) DescribeIpamExternalResourceVerificationTokens(input *DescribeIpamExternalResourceVerificationTokensInput) (*DescribeIpamExternalResourceVerificationTokensOutput, error) { + req, out := c.DescribeIpamExternalResourceVerificationTokensRequest(input) + return out, req.Send() +} + +// DescribeIpamExternalResourceVerificationTokensWithContext is the same as DescribeIpamExternalResourceVerificationTokens with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIpamExternalResourceVerificationTokens for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIpamExternalResourceVerificationTokensWithContext(ctx aws.Context, input *DescribeIpamExternalResourceVerificationTokensInput, opts ...request.Option) (*DescribeIpamExternalResourceVerificationTokensOutput, error) { + req, out := c.DescribeIpamExternalResourceVerificationTokensRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeIpamPools = "DescribeIpamPools" // DescribeIpamPoolsRequest generates a "aws/request.Request" representing the @@ -26489,7 +26716,9 @@ func (c *EC2) DescribeNatGatewaysRequest(input *DescribeNatGatewaysInput) (req * // DescribeNatGateways API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your NAT gateways. +// Describes your NAT gateways. The default is to describe all your NAT gateways. +// Alternatively, you can specify specific NAT gateway IDs or filter the results +// to include only the NAT gateways that match specific criteria. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -26619,7 +26848,9 @@ func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req * // DescribeNetworkAcls API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your network ACLs. +// Describes your network ACLs. The default is to describe all your network +// ACLs. Alternatively, you can specify specific network ACL IDs or filter the +// results to include only the network ACLs that match specific criteria. // // For more information, see Network ACLs (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html) // in the Amazon VPC User Guide. @@ -27608,6 +27839,12 @@ func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput // DescribePlacementGroups API operation for Amazon Elastic Compute Cloud. // // Describes the specified placement groups or all of your placement groups. +// +// To describe a specific placement group that is shared with your account, +// you must specify the ID of the placement group using the GroupId parameter. +// Specifying the name of a shared placement group using the GroupNames parameter +// will result in an error. +// // For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon EC2 User Guide. // @@ -28092,12 +28329,12 @@ func (c *EC2) DescribeRegionsRequest(input *DescribeRegionsInput) (req *request. // // Describes the Regions that are enabled for your account, or all Regions. // -// For a list of the Regions supported by Amazon EC2, see Amazon Elastic Compute -// Cloud endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/ec2-service.html). +// For a list of the Regions supported by Amazon EC2, see Amazon EC2 service +// endpoints (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-endpoints.html). // // For information about enabling and disabling Regions for your account, see -// Managing Amazon Web Services Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) -// in the Amazon Web Services General Reference. +// Specify which Amazon Web Services Regions your account can use (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html) +// in the Amazon Web Services Account Management Reference Guide. // // The order of the elements in the response, including those within nested // structures, might vary. Applications should not assume the elements appear @@ -28182,7 +28419,7 @@ func (c *EC2) DescribeReplaceRootVolumeTasksRequest(input *DescribeReplaceRootVo // // Describes a root volume replacement task. For more information, see Replace // a root volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/replace-root.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -28406,7 +28643,7 @@ func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedIn // demand is met. You are charged based on the total price of all of the listings // that you purchase. // -// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// For more information, see Sell in the Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) // in the Amazon EC2 User Guide. // // The order of the elements in the response, including those within nested @@ -28495,7 +28732,7 @@ func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReser // requests is returned. If a modification ID is specified, only information // about the specific modification is returned. // -// For more information, see Modifying Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// For more information, see Modify Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) // in the Amazon EC2 User Guide. // // The order of the elements in the response, including those within nested @@ -28640,7 +28877,7 @@ func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedI // Marketplace, they will be excluded from these results. This is to ensure // that you do not purchase your own Reserved Instances. // -// For more information, see Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// For more information, see Sell in the Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) // in the Amazon EC2 User Guide. // // The order of the elements in the response, including those within nested @@ -28775,7 +29012,9 @@ func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req * // DescribeRouteTables API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your route tables. +// Describes your route tables. The default is to describe all your route tables. +// Alternatively, you can specify specific route table IDs or filter the results +// to include only the route tables that match specific criteria. // // Each subnet in your VPC must be associated with a route table. If a subnet // is not explicitly associated with any route table, it is implicitly associated @@ -29898,7 +30137,7 @@ func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafee // // Describes the data feed for Spot Instances. For more information, see Spot // Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) -// in the Amazon EC2 User Guide for Linux Instances. +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -30412,7 +30651,7 @@ func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInp // // Describes the Spot price history. For more information, see Spot Instance // pricing history (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) -// in the Amazon EC2 User Guide for Linux Instances. +// in the Amazon EC2 User Guide. // // When you specify a start and end time, the operation returns the prices of // the instance types within that time range. It also returns the last price @@ -30826,7 +31065,9 @@ func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request. // DescribeSubnets API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your subnets. +// Describes your subnets. The default is to describe all your subnets. Alternatively, +// you can specify specific subnet IDs or filter the results to include only +// the subnets that match specific criteria. // // For more information, see Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html) // in the Amazon VPC User Guide. @@ -31050,6 +31291,79 @@ func (c *EC2) DescribeTagsPagesWithContext(ctx aws.Context, input *DescribeTagsI return p.Err() } +const opDescribeTrafficMirrorFilterRules = "DescribeTrafficMirrorFilterRules" + +// DescribeTrafficMirrorFilterRulesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrafficMirrorFilterRules operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTrafficMirrorFilterRules for more information on using the DescribeTrafficMirrorFilterRules +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeTrafficMirrorFilterRulesRequest method. +// req, resp := client.DescribeTrafficMirrorFilterRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorFilterRules +func (c *EC2) DescribeTrafficMirrorFilterRulesRequest(input *DescribeTrafficMirrorFilterRulesInput) (req *request.Request, output *DescribeTrafficMirrorFilterRulesOutput) { + op := &request.Operation{ + Name: opDescribeTrafficMirrorFilterRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTrafficMirrorFilterRulesInput{} + } + + output = &DescribeTrafficMirrorFilterRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTrafficMirrorFilterRules API operation for Amazon Elastic Compute Cloud. +// +// Describe traffic mirror filters that determine the traffic that is mirrored. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeTrafficMirrorFilterRules for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTrafficMirrorFilterRules +func (c *EC2) DescribeTrafficMirrorFilterRules(input *DescribeTrafficMirrorFilterRulesInput) (*DescribeTrafficMirrorFilterRulesOutput, error) { + req, out := c.DescribeTrafficMirrorFilterRulesRequest(input) + return out, req.Send() +} + +// DescribeTrafficMirrorFilterRulesWithContext is the same as DescribeTrafficMirrorFilterRules with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTrafficMirrorFilterRules for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTrafficMirrorFilterRulesWithContext(ctx aws.Context, input *DescribeTrafficMirrorFilterRulesInput, opts ...request.Option) (*DescribeTrafficMirrorFilterRulesOutput, error) { + req, out := c.DescribeTrafficMirrorFilterRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeTrafficMirrorFilters = "DescribeTrafficMirrorFilters" // DescribeTrafficMirrorFiltersRequest generates a "aws/request.Request" representing the @@ -33969,13 +34283,6 @@ func (c *EC2) DescribeVolumesModificationsRequest(input *DescribeVolumesModifica // Describes the most recent volume modification request for the specified EBS // volumes. // -// If a volume has never been modified, some information in the output will -// be null. If a volume has been modified more than once, the output includes -// only the most recent modification request. -// -// You can also use CloudWatch Events to check the status of a modification -// to an EBS volume. For information about CloudWatch Events, see the Amazon -// CloudWatch Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). // For more information, see Monitor the progress of volume modifications (https://docs.aws.amazon.com/ebs/latest/userguide/monitoring-volume-modifications.html) // in the Amazon EBS User Guide. // @@ -34995,7 +35302,9 @@ func (c *EC2) DescribeVpcEndpointsRequest(input *DescribeVpcEndpointsInput) (req // DescribeVpcEndpoints API operation for Amazon Elastic Compute Cloud. // -// Describes your VPC endpoints. +// Describes your VPC endpoints. The default is to describe all your VPC endpoints. +// Alternatively, you can specify specific VPC endpoint IDs or filter the results +// to include only the VPC endpoints that match specific criteria. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -35125,7 +35434,10 @@ func (c *EC2) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConn // DescribeVpcPeeringConnections API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your VPC peering connections. +// Describes your VPC peering connections. The default is to describe all your +// VPC peering connections. Alternatively, you can specify specific VPC peering +// connection IDs or filter the results to include only the VPC peering connections +// that match specific criteria. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -35255,7 +35567,9 @@ func (c *EC2) DescribeVpcsRequest(input *DescribeVpcsInput) (req *request.Reques // DescribeVpcs API operation for Amazon Elastic Compute Cloud. // -// Describes one or more of your VPCs. +// Describes your VPCs. The default is to describe all your VPCs. Alternatively, +// you can specify specific VPC IDs or filter the results to include only the +// VPCs that match specific criteria. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -36005,7 +36319,7 @@ func (c *EC2) DisableAddressTransferRequest(input *DisableAddressTransferInput) // // Disables Elastic IP address transfer. For more information, see Transfer // Elastic IP addresses (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#transfer-EIPs-intro) -// in the Amazon Virtual Private Cloud User Guide. +// in the Amazon VPC User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -38410,7 +38724,7 @@ func (c *EC2) EnableAddressTransferRequest(input *EnableAddressTransferInput) (r // // Enables Elastic IP address transfer. For more information, see Transfer Elastic // IP addresses (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#transfer-EIPs-intro) -// in the Amazon Virtual Private Cloud User Guide. +// in the Amazon VPC User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -40060,8 +40374,8 @@ func (c *EC2) ExportTransitGatewayRoutesRequest(input *ExportTransitGatewayRoute // by CIDR range. // // The routes are saved to the specified bucket in a JSON file. For more information, -// see Export Route Tables to Amazon S3 (https://docs.aws.amazon.com/vpc/latest/tgw/tgw-route-tables.html#tgw-export-route-tables) -// in Transit Gateways. +// see Export route tables to Amazon S3 (https://docs.aws.amazon.com/vpc/latest/tgw/tgw-route-tables.html#tgw-export-route-tables) +// in the Amazon Web Services Transit Gateways Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -40628,15 +40942,6 @@ func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *reques // instances, the instance console output includes the last three system event // log errors. // -// By default, the console output returns buffered information that was posted -// shortly after an instance transition state (start, stop, reboot, or terminate). -// This information is available for at least one hour after the most recent -// post. Only the most recent 64 KB of console output is available. -// -// You can optionally retrieve the latest serial console output at any time -// during the instance lifecycle. This option is supported on instance types -// that use the Nitro hypervisor. -// // For more information, see Instance console output (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html#instance-console-console-output) // in the Amazon EC2 User Guide. // @@ -42262,7 +42567,7 @@ func (c *EC2) GetIpamPoolAllocationsRequest(input *GetIpamPoolAllocationsInput) // // If you use this action after AllocateIpamPoolCidr (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AllocateIpamPoolCidr.html) // or ReleaseIpamPoolAllocation (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ReleaseIpamPoolAllocation.html), -// note that all EC2 API actions follow an eventual consistency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/query-api-troubleshooting.html#eventual-consistency) +// note that all EC2 API actions follow an eventual consistency (https://docs.aws.amazon.com/ec2/latest/devguide/eventual-consistency.html) // model. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -43199,8 +43504,8 @@ func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request. // // The Windows password is generated at boot by the EC2Config service or EC2Launch // scripts (Windows Server 2016 and later). This usually only happens the first -// time an instance is launched. For more information, see EC2Config (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/UsingConfig_WinAMI.html) -// and EC2Launch (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2launch.html) +// time an instance is launched. For more information, see EC2Config (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UsingConfig_WinAMI.html) +// and EC2Launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2launch.html) // in the Amazon EC2 User Guide. // // For the EC2Config service, the password is not generated for rebundled AMIs @@ -46066,11 +46371,7 @@ func (c *EC2) ModifyAvailabilityZoneGroupRequest(input *ModifyAvailabilityZoneGr // ModifyAvailabilityZoneGroup API operation for Amazon Elastic Compute Cloud. // -// Changes the opt-in status of the Local Zone and Wavelength Zone group for -// your account. -// -// Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) -// to view the value for GroupName. +// Changes the opt-in status of the specified zone group for your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -48568,7 +48869,7 @@ func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput // must be identical, except for Availability Zone, network platform, and instance // type. // -// For more information, see Modifying Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) +// For more information, see Modify Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) // in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -50047,12 +50348,8 @@ func (c *EC2) ModifyVolumeRequest(input *ModifyVolumeInput) (req *request.Reques // volume's file-system size to take advantage of the new storage capacity. // For more information, see Extend the file system (https://docs.aws.amazon.com/ebs/latest/userguide/recognize-expanded-volume-linux.html). // -// You can use CloudWatch Events to check the status of a modification to an -// EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch -// Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). -// You can also track the status of a modification using DescribeVolumesModifications. -// For information about tracking status changes using either method, see Monitor -// the progress of volume modifications (https://docs.aws.amazon.com/ebs/latest/userguide/monitoring-volume-modifications.html). +// For more information, see Monitor the progress of volume modifications (https://docs.aws.amazon.com/ebs/latest/userguide/monitoring-volume-modifications.html) +// in the Amazon EBS User Guide. // // With previous-generation instance types, resizing an EBS volume might require // detaching and reattaching the volume or stopping and restarting the instance. @@ -51430,7 +51727,7 @@ func (c *EC2) ProvisionByoipCidrRequest(input *ProvisionByoipCidrInput) (req *re // you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 // to advertise the address range. For more information, see Bring your own // IP addresses (BYOIP) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. // // Provisioning an address range is an asynchronous operation, so the call returns // immediately, but the address range is not ready to use until its status changes @@ -51903,7 +52200,7 @@ func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedIn // If you do not specify a purchase time, the default is the current time. // // For more information, see Reserved Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) -// and Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) +// and Sell in the Reserved Instance Marketplace (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) // in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -52348,9 +52645,9 @@ func (c *EC2) RegisterTransitGatewayMulticastGroupMembersRequest(input *Register // // Registers members (network interfaces) with the transit gateway multicast // group. A member is a network interface associated with a supported EC2 instance -// that receives multicast traffic. For information about supported instances, -// see Multicast Consideration (https://docs.aws.amazon.com/vpc/latest/tgw/transit-gateway-limits.html#multicast-limits) -// in Amazon VPC Transit Gateways. +// that receives multicast traffic. For more information, see Multicast on transit +// gateways (https://docs.aws.amazon.com/vpc/latest/tgw/tgw-multicast-overview.html) +// in the Amazon Web Services Transit Gateways Guide. // // After you add the members, use SearchTransitGatewayMulticastGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SearchTransitGatewayMulticastGroups.html) // to verify that the members were added to the transit gateway multicast group. @@ -52430,9 +52727,9 @@ func (c *EC2) RegisterTransitGatewayMulticastGroupSourcesRequest(input *Register // multicast group. // // A multicast source is a network interface attached to a supported instance -// that sends multicast traffic. For information about supported instances, -// see Multicast Considerations (https://docs.aws.amazon.com/vpc/latest/tgw/transit-gateway-limits.html#multicast-limits) -// in Amazon VPC Transit Gateways. +// that sends multicast traffic. For more information about supported instances, +// see Multicast on transit gateways (https://docs.aws.amazon.com/vpc/latest/tgw/tgw-multicast-overview.html) +// in the Amazon Web Services Transit Gateways Guide. // // After you add the source, use SearchTransitGatewayMulticastGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SearchTransitGatewayMulticastGroups.html) // to verify that the source was added to the multicast group. @@ -53065,7 +53362,7 @@ func (c *EC2) ReleaseIpamPoolAllocationRequest(input *ReleaseIpamPoolAllocationI // For more information, see Release an allocation (https://docs.aws.amazon.com/vpc/latest/ipam/release-alloc-ipam.html) // in the Amazon VPC IPAM User Guide. // -// All EC2 API actions follow an eventual consistency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/query-api-troubleshooting.html#eventual-consistency) +// All EC2 API actions follow an eventual consistency (https://docs.aws.amazon.com/ec2/latest/devguide/eventual-consistency.html) // model. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -53863,13 +54160,13 @@ func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req // // Creates a Spot Instance request. // -// For more information, see Spot Instance requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) -// in the Amazon EC2 User Guide for Linux Instances. +// For more information, see Work with Spot Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// in the Amazon EC2 User Guide. // // We strongly discourage using the RequestSpotInstances API because it is a // legacy API with no planned investment. For options for requesting Spot Instances, // see Which is the best Spot request method to use? (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-best-practices.html#which-spot-request-method-to-use) -// in the Amazon EC2 User Guide for Linux Instances. +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -54252,7 +54549,7 @@ func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput) // The sourceDestCheck attribute controls whether source/destination checking // is enabled. The default value is true, which means checking is enabled. This // value must be false for a NAT instance to perform NAT. For more information, -// see NAT Instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) +// see NAT instances (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) // in the Amazon VPC User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -55141,7 +55438,7 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // // - If you don't specify a security group ID, we use the default security -// group. For more information, see Security groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). +// group for the VPC. For more information, see Security groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). // // - If any of the AMIs have a product code attached for which the user has // not subscribed, the request fails. @@ -55155,6 +55452,9 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // batches. For example, create five separate launch requests for 100 instances // each instead of one launch request for 500 instances. // +// RunInstances is subject to both request rate limiting and resource rate limiting. +// For more information, see Request throttling (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-throttling.html). +// // An instance is ready for you to use when it's in the running state. You can // check the state of your instance using DescribeInstances. You can tag instances // and EBS volumes during launch, after launch, or both. For more information, @@ -55248,9 +55548,7 @@ func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (r // You must launch a Scheduled Instance during its scheduled time period. You // can't stop or reboot a Scheduled Instance, but you can terminate it as needed. // If you terminate a Scheduled Instance before the current scheduled time period -// ends, you can launch it again after a few minutes. For more information, -// see Scheduled Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-scheduled-instances.html) -// in the Amazon EC2 User Guide. +// ends, you can launch it again after a few minutes. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -55674,8 +55972,7 @@ func (c *EC2) SendDiagnosticInterruptRequest(input *SendDiagnosticInterruptInput // For more information about configuring your operating system to generate // a crash dump when a kernel panic or stop error occurs, see Send a diagnostic // interrupt (for advanced users) (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) -// (Linux instances) or Send a diagnostic interrupt (for advanced users) (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/diagnostic-interrupt.html) -// (Windows instances). +// in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -55770,7 +56067,7 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re // not supported on Dedicated Hosts. Before you start the instance, either change // its CPU credit option to standard, or change its tenancy to default or dedicated. // -// For more information, see Stop and start your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) +// For more information, see Stop and start Amazon EC2 instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) // in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -56072,13 +56369,13 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // StopInstances API operation for Amazon Elastic Compute Cloud. // // Stops an Amazon EBS-backed instance. For more information, see Stop and start -// your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) +// Amazon EC2 instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) // in the Amazon EC2 User Guide. // // You can use the Stop action to hibernate an instance if the instance is enabled // for hibernation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enabling-hibernation.html) // and it meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html). -// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your Amazon EC2 instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon EC2 User Guide. // // We don't charge usage for a stopped instance, or data transfer fees; however, @@ -58570,7 +58867,7 @@ func (s *AddressAttribute) SetPublicIp(v string) *AddressAttribute { // Details on the Elastic IP address transfer. For more information, see Transfer // Elastic IP addresses (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-eips.html#transfer-EIPs-intro) -// in the Amazon Virtual Private Cloud User Guide. +// in the Amazon VPC User Guide. type AddressTransfer struct { _ struct{} `type:"structure"` @@ -58798,9 +59095,6 @@ type AllocateAddressInput struct { // which Amazon Web Services advertises IP addresses. Use this parameter to // limit the IP address to this location. IP addresses cannot move between network // border groups. - // - // Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) - // to view the network border groups. NetworkBorderGroup *string `type:"string"` // The ID of an address pool that you own. Use this parameter to let Amazon @@ -58990,7 +59284,7 @@ type AllocateHostsInput struct { // see Understanding auto-placement and affinity (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-dedicated-hosts-work.html#dedicated-hosts-understanding) // in the Amazon EC2 User Guide. // - // Default: on + // Default: off AutoPlacement *string `locationName:"autoPlacement" type:"string" enum:"AutoPlacement"` // The Availability Zone in which to allocate the Dedicated Host. @@ -59202,7 +59496,7 @@ type AllocateIpamPoolCidrInput struct { Cidr *string `type:"string"` // A unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the allocation. @@ -60822,7 +61116,7 @@ type AssociateClientVpnTargetNetworkInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // The ID of the Client VPN endpoint. @@ -62263,7 +62557,7 @@ type AssociateTrunkInterfaceInput struct { BranchInterfaceId *string `type:"string" required:"true"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -62358,7 +62652,7 @@ type AssociateTrunkInterfaceOutput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` // Information about the association between the trunk network interface and @@ -63180,8 +63474,8 @@ type AttachVerifiedAccessTrustProviderInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -63738,7 +64032,7 @@ type AuthorizeClientVpnIngressInput struct { AuthorizeAllGroups *bool `type:"boolean"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // The ID of the Client VPN endpoint. @@ -64981,7 +65275,30 @@ type ByoipCidr struct { // this time. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` - // The state of the address pool. + // The state of the address range. + // + // * advertised: The address range is being advertised to the internet by + // Amazon Web Services. + // + // * deprovisioned: The address range is deprovisioned. + // + // * failed-deprovision: The request to deprovision the address range was + // unsuccessful. Ensure that all EIPs from the range have been deallocated + // and try again. + // + // * failed-provision: The request to provision the address range was unsuccessful. + // + // * pending-deprovision: You’ve submitted a request to deprovision an + // address range and it's pending. + // + // * pending-provision: You’ve submitted a request to provision an address + // range and it's pending. + // + // * provisioned: The address range is provisioned and can be advertised. + // The range is not currently advertised. + // + // * provisioned-not-publicly-advertisable: The address range is provisioned + // and cannot be advertised. State *string `locationName:"state" type:"string" enum:"ByoipCidrState"` // Upon success, contains the ID of the address pool. Otherwise, contains an @@ -67339,7 +67656,7 @@ func (s *CertificateAuthenticationRequest) SetClientRootCertificateChainArn(v st // Provides authorization for Amazon to bring a specific IP address range to // a specific Amazon Web Services account using bring your own IP addresses // (BYOIP). For more information, see Configuring your BYOIP address range (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#prepare-for-byoip) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. type CidrAuthorizationContext struct { _ struct{} `type:"structure"` @@ -69327,7 +69644,7 @@ func (s *ConnectionNotification) SetVpcEndpointId(v string) *ConnectionNotificat // A security group connection tracking configuration that enables you to set // the idle timeout for connection tracking on an Elastic network interface. // For more information, see Connection tracking timeouts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. type ConnectionTrackingConfiguration struct { _ struct{} `type:"structure"` @@ -69386,7 +69703,7 @@ func (s *ConnectionTrackingConfiguration) SetUdpTimeout(v int64) *ConnectionTrac // A security group connection tracking specification that enables you to set // the idle timeout for connection tracking on an Elastic network interface. // For more information, see Connection tracking timeouts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. type ConnectionTrackingSpecification struct { _ struct{} `type:"structure"` @@ -69445,7 +69762,7 @@ func (s *ConnectionTrackingSpecification) SetUdpTimeout(v int64) *ConnectionTrac // A security group connection tracking specification request that enables you // to set the idle timeout for connection tracking on an Elastic network interface. // For more information, see Connection tracking timeouts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. type ConnectionTrackingSpecificationRequest struct { _ struct{} `type:"structure"` @@ -69504,7 +69821,7 @@ func (s *ConnectionTrackingSpecificationRequest) SetUdpTimeout(v int64) *Connect // A security group connection tracking specification response that enables // you to set the idle timeout for connection tracking on an Elastic network // interface. For more information, see Connection tracking timeouts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. type ConnectionTrackingSpecificationResponse struct { _ struct{} `type:"structure"` @@ -69653,7 +69970,7 @@ type CopyFpgaImageInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string"` // The description for the new AFI. @@ -70060,9 +70377,9 @@ type CopySnapshotInput struct { // in the Amazon EBS User Guide. Encrypted *bool `locationName:"encrypted" type:"boolean"` - // The identifier of the Key Management Service (KMS) KMS key to use for Amazon - // EBS encryption. If this parameter is not specified, your KMS key for Amazon - // EBS is used. If KmsKeyId is specified, the encrypted state must be true. + // The identifier of the KMS key to use for Amazon EBS encryption. If this parameter + // is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, + // the encrypted state must be true. // // You can specify the KMS key using any of the following: // @@ -70090,9 +70407,9 @@ type CopySnapshotInput struct { // for this parameter uses the same logic that is described in Authenticating // Requests: Using Query Parameters (Amazon Web Services Signature Version 4) // (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) - // in the Amazon Simple Storage Service API Reference. An invalid or improperly - // signed PresignedUrl will cause the copy operation to fail asynchronously, - // and the snapshot will move to an error state. + // in the Amazon S3 API Reference. An invalid or improperly signed PresignedUrl + // will cause the copy operation to fail asynchronously, and the snapshot will + // move to an error state. // // PresignedUrl is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CopySnapshotInput's @@ -70927,7 +71244,7 @@ type CreateCarrierGatewayInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -71057,7 +71374,7 @@ type CreateClientVpnEndpointInput struct { ClientLoginBannerOptions *ClientLoginBannerOptions `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Information about the client connection logging options. @@ -71343,7 +71660,7 @@ type CreateClientVpnRouteInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // The ID of the Client VPN endpoint to which to add the route. @@ -71692,11 +72009,22 @@ func (s *CreateCoipPoolOutput) SetCoipPool(v *CoipPool) *CreateCoipPoolOutput { type CreateCustomerGatewayInput struct { _ struct{} `type:"structure"` - // For devices that support BGP, the customer gateway's BGP ASN. + // For customer gateway devices that support BGP, specify the device's ASN. + // You must specify either BgpAsn or BgpAsnExtended when creating the customer + // gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended. // // Default: 65000 + // + // Valid values: 1 to 2,147,483,647 BgpAsn *int64 `type:"integer"` + // For customer gateway devices that support BGP, specify the device's ASN. + // You must specify either BgpAsn or BgpAsnExtended when creating the customer + // gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended. + // + // Valid values: 2,147,483,648 to 4,294,967,295 + BgpAsnExtended *int64 `type:"long"` + // The Amazon Resource Name (ARN) for the customer gateway certificate. CertificateArn *string `type:"string"` @@ -71712,7 +72040,9 @@ type CreateCustomerGatewayInput struct { DryRun *bool `locationName:"dryRun" type:"boolean"` // IPv4 address for the customer gateway device's outside interface. The address - // must be static. + // must be static. If OutsideIpAddressType in your VPN connection options is + // set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. + // If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address. IpAddress *string `type:"string"` // This member has been deprecated. The Internet-routable IP address for the @@ -71765,6 +72095,12 @@ func (s *CreateCustomerGatewayInput) SetBgpAsn(v int64) *CreateCustomerGatewayIn return s } +// SetBgpAsnExtended sets the BgpAsnExtended field's value. +func (s *CreateCustomerGatewayInput) SetBgpAsnExtended(v int64) *CreateCustomerGatewayInput { + s.BgpAsnExtended = &v + return s +} + // SetCertificateArn sets the CertificateArn field's value. func (s *CreateCustomerGatewayInput) SetCertificateArn(v string) *CreateCustomerGatewayInput { s.CertificateArn = &v @@ -72106,7 +72442,7 @@ type CreateEgressOnlyInternetGatewayInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -72632,7 +72968,7 @@ type CreateFlowLogsInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string"` // The ARN of the IAM role that allows Amazon EC2 to publish flow logs across @@ -72698,7 +73034,7 @@ type CreateFlowLogsInput struct { // minute) or 600 seconds (10 minutes). This parameter must be 60 seconds for // transit gateway resource types. // - // When a network interface is attached to a Nitro-based instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances), + // When a network interface is attached to a Nitro-based instance (https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-nitro-instances.html), // the aggregation interval is always 60 seconds or less, regardless of the // value that you specify. // @@ -72900,7 +73236,7 @@ type CreateFpgaImageInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string"` // A description for the AFI. @@ -73237,15 +73573,14 @@ type CreateInstanceConnectEndpointInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // Indicates whether your client's IP address is preserved as the source. The - // value is true or false. + // Indicates whether the client IP address is preserved as the source. The following + // are the possible values. // - // * If true, your client's IP address is used when you connect to a resource. + // * true - Use the client IP address as the source. // - // * If false, the elastic network interface IP address is used when you - // connect to a resource. + // * false - Use the network interface IP address as the source. // - // Default: true + // Default: false PreserveClientIp *bool `type:"boolean"` // One or more security groups to associate with the endpoint. If you don't @@ -73691,11 +74026,119 @@ func (s *CreateInternetGatewayOutput) SetInternetGateway(v *InternetGateway) *Cr return s } +type CreateIpamExternalResourceVerificationTokenInput struct { + _ struct{} `type:"structure"` + + // A unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // A check for whether you have the required permissions for the action without + // actually making the request and provides an error response. If you have the + // required permissions, the error response is DryRunOperation. Otherwise, it + // is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the IPAM that will create the token. + // + // IpamId is a required field + IpamId *string `type:"string" required:"true"` + + // Token tags. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateIpamExternalResourceVerificationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateIpamExternalResourceVerificationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateIpamExternalResourceVerificationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateIpamExternalResourceVerificationTokenInput"} + if s.IpamId == nil { + invalidParams.Add(request.NewErrParamRequired("IpamId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateIpamExternalResourceVerificationTokenInput) SetClientToken(v string) *CreateIpamExternalResourceVerificationTokenInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateIpamExternalResourceVerificationTokenInput) SetDryRun(v bool) *CreateIpamExternalResourceVerificationTokenInput { + s.DryRun = &v + return s +} + +// SetIpamId sets the IpamId field's value. +func (s *CreateIpamExternalResourceVerificationTokenInput) SetIpamId(v string) *CreateIpamExternalResourceVerificationTokenInput { + s.IpamId = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateIpamExternalResourceVerificationTokenInput) SetTagSpecifications(v []*TagSpecification) *CreateIpamExternalResourceVerificationTokenInput { + s.TagSpecifications = v + return s +} + +type CreateIpamExternalResourceVerificationTokenOutput struct { + _ struct{} `type:"structure"` + + // The verification token. + IpamExternalResourceVerificationToken *IpamExternalResourceVerificationToken `locationName:"ipamExternalResourceVerificationToken" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateIpamExternalResourceVerificationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateIpamExternalResourceVerificationTokenOutput) GoString() string { + return s.String() +} + +// SetIpamExternalResourceVerificationToken sets the IpamExternalResourceVerificationToken field's value. +func (s *CreateIpamExternalResourceVerificationTokenOutput) SetIpamExternalResourceVerificationToken(v *IpamExternalResourceVerificationToken) *CreateIpamExternalResourceVerificationTokenOutput { + s.IpamExternalResourceVerificationToken = v + return s +} + type CreateIpamInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the IPAM. @@ -73865,7 +74308,7 @@ type CreateIpamPoolInput struct { AwsService *string `type:"string" enum:"IpamPoolAwsService"` // A unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the IPAM pool. @@ -73882,15 +74325,21 @@ type CreateIpamPoolInput struct { // IpamScopeId is a required field IpamScopeId *string `type:"string" required:"true"` - // In IPAM, the locale is the Amazon Web Services Region where you want to make - // an IPAM pool available for allocations. Only resources in the same Region - // as the locale of the pool can get IP address allocations from the pool. You - // can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares - // a locale with the VPC’s Region. Note that once you choose a Locale for - // a pool, you cannot modify it. If you do not choose a locale, resources in - // Regions others than the IPAM's home region cannot use CIDRs from this pool. + // The locale for the pool should be one of the following: // - // Possible values: Any Amazon Web Services Region, such as us-east-1. + // * An Amazon Web Services Region where you want this IPAM pool to be available + // for allocations. + // + // * The network border group for an Amazon Web Services Local Zone where + // you want this IPAM pool to be available for allocations (supported Local + // Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail)). + // This option is only available for IPAM IPv4 pools in the public scope. + // + // If you do not choose a locale, resources in Regions others than the IPAM's + // home region cannot use CIDRs from this pool. + // + // Possible values: Any Amazon Web Services Region or supported Amazon Web Services + // Local Zone. Locale *string `type:"string"` // The IP address source for pools in the public scope. Only used for provisioning @@ -74196,7 +74645,7 @@ type CreateIpamScopeInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the scope you're creating. @@ -74673,7 +75122,7 @@ type CreateLaunchTemplateVersionInput struct { // If true, and if a Systems Manager parameter is specified for ImageId, the // AMI ID is displayed in the response for imageID. For more information, see // Use a Systems Manager parameter instead of an AMI ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. // // Default: false ResolveAlias *bool `type:"boolean"` @@ -75295,7 +75744,7 @@ type CreateManagedPrefixListInput struct { AddressFamily *string `type:"string" required:"true"` // Unique, case-sensitive identifier you provide to ensure the idempotency of - // the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). // // Constraints: Up to 255 UTF-8 characters in length. ClientToken *string `type:"string" idempotencyToken:"true"` @@ -75455,7 +75904,7 @@ type CreateNatGatewayInput struct { AllocationId *string `type:"string"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). // // Constraint: Maximum 64 ASCII characters. ClientToken *string `type:"string" idempotencyToken:"true"` @@ -75825,7 +76274,7 @@ type CreateNetworkAclInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -75943,7 +76392,7 @@ type CreateNetworkInsightsAccessScopeInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -76054,7 +76503,7 @@ type CreateNetworkInsightsPathInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // The ID or ARN of the destination. If the resource is in another account, @@ -76236,7 +76685,7 @@ type CreateNetworkInterfaceInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A connection tracking specification for the network interface. @@ -76783,6 +77232,13 @@ type CreatePublicIpv4PoolInput struct { // is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The Availability Zone (AZ) or Local Zone (LZ) network border group that the + // resource that the IP address is assigned to is in. Defaults to an AZ network + // border group. For more information on available Local Zones, see Local Zone + // availability (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) + // in the Amazon EC2 User Guide. + NetworkBorderGroup *string `type:"string"` + // The key/value combination of a tag assigned to the resource. Use the tag // key in the filter name and the tag value as the filter value. For example, // to find all resources that have a tag with the key Owner and the value TeamA, @@ -76814,6 +77270,12 @@ func (s *CreatePublicIpv4PoolInput) SetDryRun(v bool) *CreatePublicIpv4PoolInput return s } +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *CreatePublicIpv4PoolInput) SetNetworkBorderGroup(v string) *CreatePublicIpv4PoolInput { + s.NetworkBorderGroup = &v + return s +} + // SetTagSpecifications sets the TagSpecifications field's value. func (s *CreatePublicIpv4PoolInput) SetTagSpecifications(v []*TagSpecification) *CreatePublicIpv4PoolInput { s.TagSpecifications = v @@ -76857,7 +77319,7 @@ type CreateReplaceRootVolumeTaskInput struct { // Unique, case-sensitive identifier you provide to ensure the idempotency of // the request. If you do not specify a client token, a randomly generated token // is used for the request to ensure idempotency. For more information, see - // Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Indicates whether to automatically delete the original root volume after @@ -77486,7 +77948,7 @@ type CreateRouteTableInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -78355,7 +78817,7 @@ type CreateSubnetInput struct { // // To create a subnet in a Local Zone, set this value to the Local Zone ID, // for example us-west-2-lax-1a. For information about the Regions that support - // Local Zones, see Local Zones locations (http://aws.amazon.com/about-aws/global-infrastructure/localzones/locations/). + // Local Zones, see Available Local Zones (https://docs.aws.amazon.com/local-zones/latest/ug/available-local-zones.html). // // To create a subnet in an Outpost, set this value to the Availability Zone // for the Outpost and specify the Outpost ARN. @@ -78652,7 +79114,7 @@ type CreateTrafficMirrorFilterInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // The description of the Traffic Mirror filter. @@ -78714,7 +79176,7 @@ type CreateTrafficMirrorFilterOutput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` // Information about the Traffic Mirror filter. @@ -78755,7 +79217,7 @@ type CreateTrafficMirrorFilterRuleInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // The description of the Traffic Mirror rule. @@ -78801,6 +79263,9 @@ type CreateTrafficMirrorFilterRuleInput struct { // The source port range. SourcePortRange *TrafficMirrorPortRangeRequest `type:"structure"` + // Traffic Mirroring tags specifications. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The type of traffic. // // TrafficDirection is a required field @@ -78918,6 +79383,12 @@ func (s *CreateTrafficMirrorFilterRuleInput) SetSourcePortRange(v *TrafficMirror return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateTrafficMirrorFilterRuleInput) SetTagSpecifications(v []*TagSpecification) *CreateTrafficMirrorFilterRuleInput { + s.TagSpecifications = v + return s +} + // SetTrafficDirection sets the TrafficDirection field's value. func (s *CreateTrafficMirrorFilterRuleInput) SetTrafficDirection(v string) *CreateTrafficMirrorFilterRuleInput { s.TrafficDirection = &v @@ -78934,7 +79405,7 @@ type CreateTrafficMirrorFilterRuleOutput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` // The Traffic Mirror rule. @@ -78975,7 +79446,7 @@ type CreateTrafficMirrorSessionInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // The description of the Traffic Mirror session. @@ -79029,9 +79500,9 @@ type CreateTrafficMirrorSessionInput struct { TrafficMirrorTargetId *string `type:"string" required:"true"` // The VXLAN ID for the Traffic Mirror session. For more information about the - // VXLAN protocol, see RFC 7348 (https://tools.ietf.org/html/rfc7348). If you - // do not specify a VirtualNetworkId, an account-wide unique id is chosen at - // random. + // VXLAN protocol, see RFC 7348 (https://datatracker.ietf.org/doc/html/rfc7348). + // If you do not specify a VirtualNetworkId, an account-wide unique ID is chosen + // at random. VirtualNetworkId *int64 `type:"integer"` } @@ -79139,7 +79610,7 @@ type CreateTrafficMirrorSessionOutput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` // Information about the Traffic Mirror session. @@ -79180,7 +79651,7 @@ type CreateTrafficMirrorTargetInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // The description of the Traffic Mirror target. @@ -79270,7 +79741,7 @@ type CreateTrafficMirrorTargetOutput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` // Information about the Traffic Mirror target. @@ -80900,8 +81371,8 @@ type CreateVerifiedAccessEndpointInput struct { AttachmentType *string `type:"string" required:"true" enum:"VerifiedAccessEndpointAttachmentType"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the Verified Access endpoint. @@ -81212,8 +81683,8 @@ type CreateVerifiedAccessGroupInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the Verified Access group. @@ -81348,8 +81819,8 @@ type CreateVerifiedAccessInstanceInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the Verified Access instance. @@ -81495,8 +81966,8 @@ type CreateVerifiedAccessTrustProviderInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the Verified Access trust provider. @@ -81775,7 +82246,7 @@ type CreateVolumeInput struct { AvailabilityZone *string `type:"string" required:"true"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensure Idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -81808,7 +82279,7 @@ type CreateVolumeInput struct { // * io2: 100 - 256,000 IOPS // // For io2 volumes, you can achieve up to 256,000 IOPS on instances built on - // the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // the Nitro System (https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-nitro-instances.html). // On other instances, you can achieve performance up to 32,000 IOPS. // // This parameter is required for io1 and io2 volumes. The default for gp3 volumes @@ -81816,9 +82287,9 @@ type CreateVolumeInput struct { // volumes. Iops *int64 `type:"integer"` - // The identifier of the Key Management Service (KMS) KMS key to use for Amazon - // EBS encryption. If this parameter is not specified, your KMS key for Amazon - // EBS is used. If KmsKeyId is specified, the encrypted state must be true. + // The identifier of the KMS key to use for Amazon EBS encryption. If this parameter + // is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, + // the encrypted state must be true. // // You can specify the KMS key using any of the following: // @@ -81837,13 +82308,18 @@ type CreateVolumeInput struct { // Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, // you can attach the volume to up to 16 Instances built on the Nitro System - // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) + // (https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-nitro-instances.html) // in the same Availability Zone. This parameter is supported with io1 and io2 // volumes only. For more information, see Amazon EBS Multi-Attach (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volumes-multi.html) // in the Amazon EBS User Guide. MultiAttachEnabled *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the Outpost. + // The Amazon Resource Name (ARN) of the Outpost on which to create the volume. + // + // If you intend to use a volume with an instance running on an outpost, then + // you must create the volume on the same outpost as the instance. You can't + // use a volume created in an Amazon Web Services Region with an instance on + // an Amazon Web Services outpost, or the other way around. OutpostArn *string `type:"string"` // The size of the volume, in GiBs. You must specify either a snapshot ID or @@ -82096,7 +82572,7 @@ type CreateVpcEndpointConnectionNotificationInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string"` // The endpoint events for which to receive notifications. Valid values are @@ -82238,7 +82714,7 @@ type CreateVpcEndpointInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string"` // The DNS options for the endpoint. @@ -82476,7 +82952,7 @@ type CreateVpcEndpointServiceConfigurationInput struct { AcceptanceRequired *bool `type:"boolean"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -83372,10 +83848,18 @@ func (s *CreditSpecificationRequest) SetCpuCredits(v string) *CreditSpecificatio type CustomerGateway struct { _ struct{} `type:"structure"` - // The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number - // (ASN). + // The customer gateway device's Border Gateway Protocol (BGP) Autonomous System + // Number (ASN). + // + // Valid values: 1 to 2,147,483,647 BgpAsn *string `locationName:"bgpAsn" type:"string"` + // The customer gateway device's Border Gateway Protocol (BGP) Autonomous System + // Number (ASN). + // + // Valid values: 2,147,483,648 to 4,294,967,295 + BgpAsnExtended *string `locationName:"bgpAsnExtended" type:"string"` + // The Amazon Resource Name (ARN) for the customer gateway certificate. CertificateArn *string `locationName:"certificateArn" type:"string"` @@ -83385,7 +83869,10 @@ type CustomerGateway struct { // The name of customer gateway device. DeviceName *string `locationName:"deviceName" type:"string"` - // The IP address of the customer gateway device's outside interface. + // IPv4 address for the customer gateway device's outside interface. The address + // must be static. If OutsideIpAddressType in your VPN connection options is + // set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. + // If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address. IpAddress *string `locationName:"ipAddress" type:"string"` // The current state of the customer gateway (pending | available | deleting @@ -83423,6 +83910,12 @@ func (s *CustomerGateway) SetBgpAsn(v string) *CustomerGateway { return s } +// SetBgpAsnExtended sets the BgpAsnExtended field's value. +func (s *CustomerGateway) SetBgpAsnExtended(v string) *CustomerGateway { + s.BgpAsnExtended = &v + return s +} + // SetCertificateArn sets the CertificateArn field's value. func (s *CustomerGateway) SetCertificateArn(v string) *CustomerGateway { s.CertificateArn = &v @@ -85073,6 +85566,95 @@ func (s DeleteInternetGatewayOutput) GoString() string { return s.String() } +type DeleteIpamExternalResourceVerificationTokenInput struct { + _ struct{} `type:"structure"` + + // A check for whether you have the required permissions for the action without + // actually making the request and provides an error response. If you have the + // required permissions, the error response is DryRunOperation. Otherwise, it + // is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The token ID. + // + // IpamExternalResourceVerificationTokenId is a required field + IpamExternalResourceVerificationTokenId *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteIpamExternalResourceVerificationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteIpamExternalResourceVerificationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIpamExternalResourceVerificationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIpamExternalResourceVerificationTokenInput"} + if s.IpamExternalResourceVerificationTokenId == nil { + invalidParams.Add(request.NewErrParamRequired("IpamExternalResourceVerificationTokenId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteIpamExternalResourceVerificationTokenInput) SetDryRun(v bool) *DeleteIpamExternalResourceVerificationTokenInput { + s.DryRun = &v + return s +} + +// SetIpamExternalResourceVerificationTokenId sets the IpamExternalResourceVerificationTokenId field's value. +func (s *DeleteIpamExternalResourceVerificationTokenInput) SetIpamExternalResourceVerificationTokenId(v string) *DeleteIpamExternalResourceVerificationTokenInput { + s.IpamExternalResourceVerificationTokenId = &v + return s +} + +type DeleteIpamExternalResourceVerificationTokenOutput struct { + _ struct{} `type:"structure"` + + // The verification token. + IpamExternalResourceVerificationToken *IpamExternalResourceVerificationToken `locationName:"ipamExternalResourceVerificationToken" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteIpamExternalResourceVerificationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteIpamExternalResourceVerificationTokenOutput) GoString() string { + return s.String() +} + +// SetIpamExternalResourceVerificationToken sets the IpamExternalResourceVerificationToken field's value. +func (s *DeleteIpamExternalResourceVerificationTokenOutput) SetIpamExternalResourceVerificationToken(v *IpamExternalResourceVerificationToken) *DeleteIpamExternalResourceVerificationTokenOutput { + s.IpamExternalResourceVerificationToken = v + return s +} + type DeleteIpamInput struct { _ struct{} `type:"structure"` @@ -87265,6 +87847,13 @@ type DeletePublicIpv4PoolInput struct { // is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The Availability Zone (AZ) or Local Zone (LZ) network border group that the + // resource that the IP address is assigned to is in. Defaults to an AZ network + // border group. For more information on available Local Zones, see Local Zone + // availability (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) + // in the Amazon EC2 User Guide. + NetworkBorderGroup *string `type:"string"` + // The ID of the public IPv4 pool you want to delete. // // PoolId is a required field @@ -87308,6 +87897,12 @@ func (s *DeletePublicIpv4PoolInput) SetDryRun(v bool) *DeletePublicIpv4PoolInput return s } +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *DeletePublicIpv4PoolInput) SetNetworkBorderGroup(v string) *DeletePublicIpv4PoolInput { + s.NetworkBorderGroup = &v + return s +} + // SetPoolId sets the PoolId field's value. func (s *DeletePublicIpv4PoolInput) SetPoolId(v string) *DeletePublicIpv4PoolInput { s.PoolId = &v @@ -89528,8 +90123,8 @@ type DeleteVerifiedAccessEndpointInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -89628,8 +90223,8 @@ type DeleteVerifiedAccessGroupInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -89728,8 +90323,8 @@ type DeleteVerifiedAccessInstanceInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -89828,8 +90423,8 @@ type DeleteVerifiedAccessTrustProviderInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -94252,9 +94847,7 @@ func (s *DescribeCustomerGatewaysOutput) SetCustomerGateways(v []*CustomerGatewa type DescribeDhcpOptionsInput struct { _ struct{} `type:"structure"` - // The IDs of one or more DHCP options sets. - // - // Default: Describes all your DHCP options sets. + // The IDs of DHCP option sets. DhcpOptionsIds []*string `locationName:"DhcpOptionsId" locationNameList:"DhcpOptionsId" type:"list"` // Checks whether you have the required permissions for the action, without @@ -94359,7 +94952,7 @@ func (s *DescribeDhcpOptionsInput) SetNextToken(v string) *DescribeDhcpOptionsIn type DescribeDhcpOptionsOutput struct { _ struct{} `type:"structure"` - // Information about one or more DHCP options sets. + // Information about the DHCP options sets. DhcpOptions []*DhcpOptions `locationName:"dhcpOptionsSet" locationNameList:"item" type:"list"` // The token to include in another request to get the next page of items. This @@ -99592,7 +100185,10 @@ type DescribeInstancesInput struct { // * private-dns-name-options.hostname-type - The type of hostname (ip-name // | resource-name). // - // * private-ip-address - The private IPv4 address of the instance. + // * private-ip-address - The private IPv4 address of the instance. This + // can only be used to filter by the primary IP address of the network interface + // attached to the instance. To filter by additional IP addresses assigned + // to the network interface, use the filter network-interface.addresses.private-ip-address. // // * product-code - The product code associated with the AMI used to launch // the instance. @@ -99884,7 +100480,7 @@ func (s *DescribeInternetGatewaysInput) SetNextToken(v string) *DescribeInternet type DescribeInternetGatewaysOutput struct { _ struct{} `type:"structure"` - // Information about one or more internet gateways. + // Information about the internet gateways. InternetGateways []*InternetGateway `locationName:"internetGatewaySet" locationNameList:"item" type:"list"` // The token to include in another request to get the next page of items. This @@ -100029,6 +100625,151 @@ func (s *DescribeIpamByoasnOutput) SetNextToken(v string) *DescribeIpamByoasnOut return s } +type DescribeIpamExternalResourceVerificationTokensInput struct { + _ struct{} `type:"structure"` + + // A check for whether you have the required permissions for the action without + // actually making the request and provides an error response. If you have the + // required permissions, the error response is DryRunOperation. Otherwise, it + // is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters for the request. For more information about filtering, + // see Filtering CLI output (https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-filter.html). + // + // Available filters: + // + // * ipam-arn + // + // * ipam-external-resource-verification-token-arn + // + // * ipam-external-resource-verification-token-id + // + // * ipam-id + // + // * ipam-region + // + // * state + // + // * status + // + // * token-name + // + // * token-value + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // Verification token IDs. + IpamExternalResourceVerificationTokenIds []*string `locationName:"IpamExternalResourceVerificationTokenId" locationNameList:"item" type:"list"` + + // The maximum number of tokens to return in one page of results. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeIpamExternalResourceVerificationTokensInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeIpamExternalResourceVerificationTokensInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIpamExternalResourceVerificationTokensInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIpamExternalResourceVerificationTokensInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeIpamExternalResourceVerificationTokensInput) SetDryRun(v bool) *DescribeIpamExternalResourceVerificationTokensInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeIpamExternalResourceVerificationTokensInput) SetFilters(v []*Filter) *DescribeIpamExternalResourceVerificationTokensInput { + s.Filters = v + return s +} + +// SetIpamExternalResourceVerificationTokenIds sets the IpamExternalResourceVerificationTokenIds field's value. +func (s *DescribeIpamExternalResourceVerificationTokensInput) SetIpamExternalResourceVerificationTokenIds(v []*string) *DescribeIpamExternalResourceVerificationTokensInput { + s.IpamExternalResourceVerificationTokenIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeIpamExternalResourceVerificationTokensInput) SetMaxResults(v int64) *DescribeIpamExternalResourceVerificationTokensInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeIpamExternalResourceVerificationTokensInput) SetNextToken(v string) *DescribeIpamExternalResourceVerificationTokensInput { + s.NextToken = &v + return s +} + +type DescribeIpamExternalResourceVerificationTokensOutput struct { + _ struct{} `type:"structure"` + + // Verification tokens. + IpamExternalResourceVerificationTokens []*IpamExternalResourceVerificationToken `locationName:"ipamExternalResourceVerificationTokenSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeIpamExternalResourceVerificationTokensOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeIpamExternalResourceVerificationTokensOutput) GoString() string { + return s.String() +} + +// SetIpamExternalResourceVerificationTokens sets the IpamExternalResourceVerificationTokens field's value. +func (s *DescribeIpamExternalResourceVerificationTokensOutput) SetIpamExternalResourceVerificationTokens(v []*IpamExternalResourceVerificationToken) *DescribeIpamExternalResourceVerificationTokensOutput { + s.IpamExternalResourceVerificationTokens = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeIpamExternalResourceVerificationTokensOutput) SetNextToken(v string) *DescribeIpamExternalResourceVerificationTokensOutput { + s.NextToken = &v + return s +} + type DescribeIpamPoolsInput struct { _ struct{} `type:"structure"` @@ -100999,7 +101740,7 @@ type DescribeLaunchTemplateVersionsInput struct { // // For more information, see Use a Systems Manager parameter instead of an AMI // ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. // // Default: false ResolveAlias *bool `type:"boolean"` @@ -102862,8 +103603,6 @@ type DescribeNetworkAclsInput struct { MaxResults *int64 `min:"5" type:"integer"` // The IDs of the network ACLs. - // - // Default: Describes all your network ACLs. NetworkAclIds []*string `locationName:"NetworkAclId" locationNameList:"item" type:"list"` // The token returned from a previous paginated request. Pagination continues @@ -102935,7 +103674,7 @@ func (s *DescribeNetworkAclsInput) SetNextToken(v string) *DescribeNetworkAclsIn type DescribeNetworkAclsOutput struct { _ struct{} `type:"structure"` - // Information about one or more network ACLs. + // Information about the network ACLs. NetworkAcls []*NetworkAcl `locationName:"networkAclSet" locationNameList:"item" type:"list"` // The token to include in another request to get the next page of items. This @@ -104117,7 +104856,13 @@ type DescribePlacementGroupsInput struct { // The names of the placement groups. // - // Default: Describes all your placement groups, or only those otherwise specified. + // Constraints: + // + // * You can specify a name only if the placement group is owned by your + // account. + // + // * If a placement group is shared with your account, specifying the name + // results in an error. You must use the GroupId parameter instead. GroupNames []*string `locationName:"groupName" type:"list"` } @@ -105175,7 +105920,7 @@ type DescribeReservedInstancesOfferingsInput struct { InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` // The instance type that the reservation will cover (for example, m1.small). - // For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // For more information, see Amazon EC2 instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) // in the Amazon EC2 User Guide. InstanceType *string `type:"string" enum:"InstanceType"` @@ -105490,8 +106235,6 @@ type DescribeRouteTablesInput struct { NextToken *string `type:"string"` // The IDs of the route tables. - // - // Default: Describes all your route tables. RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` } @@ -105564,7 +106307,7 @@ type DescribeRouteTablesOutput struct { // value is null when there are no more items to return. NextToken *string `locationName:"nextToken" type:"string"` - // Information about one or more route tables. + // Information about the route tables. RouteTables []*RouteTable `locationName:"routeTableSet" locationNameList:"item" type:"list"` } @@ -106640,11 +107383,9 @@ type DescribeSnapshotsInput struct { // * volume-size - The size of the volume, in GiB. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The maximum number of snapshots to return for this request. This value can - // be between 5 and 1,000; if this value is larger than 1,000, only 1,000 results - // are returned. If this parameter is not used, then the request returns all - // snapshots. You cannot specify this parameter and the snapshot IDs parameter - // in the same request. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination). + // The maximum number of items to return for this request. To get the next page + // of items, make another request with the token returned in the output. For + // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination). MaxResults *int64 `type:"integer"` // The token returned from a previous paginated request. Pagination continues @@ -106728,8 +107469,8 @@ func (s *DescribeSnapshotsInput) SetSnapshotIds(v []*string) *DescribeSnapshotsI type DescribeSnapshotsOutput struct { _ struct{} `type:"structure"` - // The token to include in another request to return the next page of snapshots. - // This value is null when there are no more snapshots to return. + // The token to include in another request to get the next page of items. This + // value is null when there are no more items to return. NextToken *string `locationName:"nextToken" type:"string"` // Information about the snapshots. @@ -107346,7 +108087,7 @@ type DescribeSpotInstanceRequestsInput struct { // | cancelled | failed). Spot request status information can help you track // your Amazon EC2 Spot Instance requests. For more information, see Spot // request status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-request-status.html) - // in the Amazon EC2 User Guide for Linux Instances. + // in the Amazon EC2 User Guide. // // * status-code - The short code describing the most recent evaluation of // your Spot Instance request. @@ -108089,7 +108830,7 @@ type DescribeSubnetsOutput struct { // value is null when there are no more items to return. NextToken *string `locationName:"nextToken" type:"string"` - // Information about one or more subnets. + // Information about the subnets. Subnets []*Subnet `locationName:"subnetSet" locationNameList:"item" type:"list"` } @@ -108241,6 +108982,164 @@ func (s *DescribeTagsOutput) SetTags(v []*TagDescription) *DescribeTagsOutput { return s } +type DescribeTrafficMirrorFilterRulesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Traffic mirror filters. + // + // * traffic-mirror-filter-rule-id: The ID of the Traffic Mirror rule. + // + // * traffic-mirror-filter-id: The ID of the filter that this rule is associated + // with. + // + // * rule-number: The number of the Traffic Mirror rule. + // + // * rule-action: The action taken on the filtered traffic. Possible actions + // are accept and reject. + // + // * traffic-direction: The traffic direction. Possible directions are ingress + // and egress. + // + // * protocol: The protocol, for example UDP, assigned to the Traffic Mirror + // rule. + // + // * source-cidr-block: The source CIDR block assigned to the Traffic Mirror + // rule. + // + // * destination-cidr-block: The destination CIDR block assigned to the Traffic + // Mirror rule. + // + // * description: The description of the Traffic Mirror rule. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // Traffic filter ID. + TrafficMirrorFilterId *string `type:"string"` + + // Traffic filter rule IDs. + TrafficMirrorFilterRuleIds []*string `locationName:"TrafficMirrorFilterRuleId" locationNameList:"item" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTrafficMirrorFilterRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTrafficMirrorFilterRulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTrafficMirrorFilterRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTrafficMirrorFilterRulesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeTrafficMirrorFilterRulesInput) SetDryRun(v bool) *DescribeTrafficMirrorFilterRulesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTrafficMirrorFilterRulesInput) SetFilters(v []*Filter) *DescribeTrafficMirrorFilterRulesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTrafficMirrorFilterRulesInput) SetMaxResults(v int64) *DescribeTrafficMirrorFilterRulesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorFilterRulesInput) SetNextToken(v string) *DescribeTrafficMirrorFilterRulesInput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorFilterId sets the TrafficMirrorFilterId field's value. +func (s *DescribeTrafficMirrorFilterRulesInput) SetTrafficMirrorFilterId(v string) *DescribeTrafficMirrorFilterRulesInput { + s.TrafficMirrorFilterId = &v + return s +} + +// SetTrafficMirrorFilterRuleIds sets the TrafficMirrorFilterRuleIds field's value. +func (s *DescribeTrafficMirrorFilterRulesInput) SetTrafficMirrorFilterRuleIds(v []*string) *DescribeTrafficMirrorFilterRulesInput { + s.TrafficMirrorFilterRuleIds = v + return s +} + +type DescribeTrafficMirrorFilterRulesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. The value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Traffic mirror rules. + TrafficMirrorFilterRules []*TrafficMirrorFilterRule `locationName:"trafficMirrorFilterRuleSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTrafficMirrorFilterRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeTrafficMirrorFilterRulesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTrafficMirrorFilterRulesOutput) SetNextToken(v string) *DescribeTrafficMirrorFilterRulesOutput { + s.NextToken = &v + return s +} + +// SetTrafficMirrorFilterRules sets the TrafficMirrorFilterRules field's value. +func (s *DescribeTrafficMirrorFilterRulesOutput) SetTrafficMirrorFilterRules(v []*TrafficMirrorFilterRule) *DescribeTrafficMirrorFilterRulesOutput { + s.TrafficMirrorFilterRules = v + return s +} + type DescribeTrafficMirrorFiltersInput struct { _ struct{} `type:"structure"` @@ -110984,11 +111883,8 @@ type DescribeVolumeStatusInput struct { Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of items to return for this request. To get the next page - // of items, make another request with the token returned in the output. This - // value can be between 5 and 1,000; if the value is larger than 1,000, only - // 1,000 results are returned. If this parameter is not used, then all items - // are returned. You cannot specify this parameter and the volume IDs parameter - // in the same request. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination). + // of items, make another request with the token returned in the output. For + // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination). MaxResults *int64 `type:"integer"` // The token returned from a previous paginated request. Pagination continues @@ -111149,18 +112045,16 @@ type DescribeVolumesInput struct { // | sc1| standard) Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The maximum number of volumes to return for this request. This value can - // be between 5 and 500; if you specify a value larger than 500, only 500 items - // are returned. If this parameter is not used, then all items are returned. - // You cannot specify this parameter and the volume IDs parameter in the same - // request. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination). + // The maximum number of items to return for this request. To get the next page + // of items, make another request with the token returned in the output. For + // more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination). MaxResults *int64 `locationName:"maxResults" type:"integer"` // The token returned from a previous paginated request. Pagination continues - // from the end of the items returned from the previous request. + // from the end of the items returned by the previous request. NextToken *string `locationName:"nextToken" type:"string"` - // The volume IDs. + // The volume IDs. If not specified, then all volumes are included in the response. VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` } @@ -111255,7 +112149,7 @@ type DescribeVolumesModificationsInput struct { // paginated request. For more information, see Pagination (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination). MaxResults *int64 `type:"integer"` - // The token returned by a previous paginated request. Pagination continues + // The token returned from a previous paginated request. Pagination continues // from the end of the items returned by the previous request. NextToken *string `type:"string"` @@ -111315,7 +112209,7 @@ type DescribeVolumesModificationsOutput struct { _ struct{} `type:"structure"` // The token to include in another request to get the next page of items. This - // value is null if there are no more items to return. + // value is null when there are no more items to return. NextToken *string `locationName:"nextToken" type:"string"` // Information about the volume modifications. @@ -112494,7 +113388,7 @@ type DescribeVpcEndpointsOutput struct { // items to return, the string is empty. NextToken *string `locationName:"nextToken" type:"string"` - // Information about the endpoints. + // Information about the VPC endpoints. VpcEndpoints []*VpcEndpoint `locationName:"vpcEndpointSet" locationNameList:"item" type:"list"` } @@ -112760,8 +113654,6 @@ type DescribeVpcsInput struct { NextToken *string `type:"string"` // The IDs of the VPCs. - // - // Default: Describes all your VPCs. VpcIds []*string `locationName:"VpcId" locationNameList:"VpcId" type:"list"` } @@ -112833,7 +113725,7 @@ type DescribeVpcsOutput struct { // value is null when there are no more items to return. NextToken *string `locationName:"nextToken" type:"string"` - // Information about one or more VPCs. + // Information about the VPCs. Vpcs []*Vpc `locationName:"vpcSet" locationNameList:"item" type:"list"` } @@ -113517,8 +114409,8 @@ type DetachVerifiedAccessTrustProviderInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -117026,7 +117918,7 @@ type DisassociateTrunkInterfaceInput struct { AssociationId *string `type:"string" required:"true"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -117089,7 +117981,7 @@ type DisassociateTrunkInterfaceOutput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` // Returns true if the request succeeds; otherwise, it returns an error. @@ -118418,7 +119310,7 @@ func (s *EgressOnlyInternetGateway) SetTags(v []*Tag) *EgressOnlyInternetGateway // Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads // that require graphics acceleration, we recommend that you use Amazon EC2 -// G4ad, G4dn, or G5 instances. +// G4, G5, or G6 instances. // // Describes the association between an instance and an Elastic Graphics accelerator. type ElasticGpuAssociation struct { @@ -118482,7 +119374,7 @@ func (s *ElasticGpuAssociation) SetElasticGpuId(v string) *ElasticGpuAssociation // Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads // that require graphics acceleration, we recommend that you use Amazon EC2 -// G4ad, G4dn, or G5 instances. +// G4, G5, or G6 instances. // // Describes the status of an Elastic Graphics accelerator. type ElasticGpuHealth struct { @@ -118518,16 +119410,13 @@ func (s *ElasticGpuHealth) SetStatus(v string) *ElasticGpuHealth { // Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads // that require graphics acceleration, we recommend that you use Amazon EC2 -// G4ad, G4dn, or G5 instances. +// G4, G5, or G6 instances. // // A specification for an Elastic Graphics accelerator. type ElasticGpuSpecification struct { _ struct{} `type:"structure"` - // The type of Elastic Graphics accelerator. For more information about the - // values to specify for Type, see Elastic Graphics Basics (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html#elastic-graphics-basics), - // specifically the Elastic Graphics accelerator column, in the Amazon Elastic - // Compute Cloud User Guide for Windows Instances. + // The type of Elastic Graphics accelerator. // // Type is a required field Type *string `type:"string" required:"true"` @@ -118612,7 +119501,7 @@ func (s *ElasticGpuSpecificationResponse) SetType(v string) *ElasticGpuSpecifica // Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads // that require graphics acceleration, we recommend that you use Amazon EC2 -// G4ad, G4dn, or G5 instances. +// G4, G5, or G6 instances. // // Describes an Elastic Graphics accelerator. type ElasticGpus struct { @@ -123811,9 +124700,38 @@ type FleetLaunchTemplateOverrides struct { // The Availability Zone in which to launch the instances. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - // The ID of the AMI. An AMI is required to launch an instance. This parameter - // is only available for fleets of type instant. For fleets of type maintain - // and request, you must specify the AMI ID in the launch template. + // The ID of the AMI in the format ami-17characters00000. + // + // Alternatively, you can specify a Systems Manager parameter, using one of + // the following formats. The Systems Manager parameter will resolve to an AMI + // ID on launch. + // + // To reference a public parameter: + // + // * resolve:ssm:public-parameter + // + // To reference a parameter stored in the same account: + // + // * resolve:ssm:parameter-name + // + // * resolve:ssm:parameter-name:version-number + // + // * resolve:ssm:parameter-name:label + // + // To reference a parameter shared from another Amazon Web Services account: + // + // * resolve:ssm:parameter-ARN + // + // * resolve:ssm:parameter-ARN:version-number + // + // * resolve:ssm:parameter-ARN:label + // + // For more information, see Use a Systems Manager parameter instead of an AMI + // ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id) + // in the Amazon EC2 User Guide. + // + // This parameter is only available for fleets of type instant. For fleets of + // type maintain and request, you must specify the AMI ID in the launch template. ImageId *string `locationName:"imageId" type:"string"` // The attributes for the instance types. When you specify instance attributes, @@ -123861,7 +124779,13 @@ type FleetLaunchTemplateOverrides struct { // The ID of the subnet in which to launch the instances. SubnetId *string `locationName:"subnetId" type:"string"` - // The number of units provided by the specified instance type. + // The number of units provided by the specified instance type. These are the + // same units that you chose to set the target capacity in terms of instances, + // or a performance characteristic such as vCPUs, memory, or I/O. + // + // If the target capacity divided by this value is not a whole number, Amazon + // EC2 rounds the number of instances to the next whole number. If this value + // is not specified, the default is 1. // // When specifying weights, the price used in the lowest-price and price-capacity-optimized // allocation strategies is per unit hour (where the instance price is divided @@ -123950,9 +124874,38 @@ type FleetLaunchTemplateOverridesRequest struct { // The Availability Zone in which to launch the instances. AvailabilityZone *string `type:"string"` - // The ID of the AMI. An AMI is required to launch an instance. This parameter - // is only available for fleets of type instant. For fleets of type maintain - // and request, you must specify the AMI ID in the launch template. + // The ID of the AMI in the format ami-17characters00000. + // + // Alternatively, you can specify a Systems Manager parameter, using one of + // the following formats. The Systems Manager parameter will resolve to an AMI + // ID on launch. + // + // To reference a public parameter: + // + // * resolve:ssm:public-parameter + // + // To reference a parameter stored in the same account: + // + // * resolve:ssm:parameter-name + // + // * resolve:ssm:parameter-name:version-number + // + // * resolve:ssm:parameter-name:label + // + // To reference a parameter shared from another Amazon Web Services account: + // + // * resolve:ssm:parameter-ARN + // + // * resolve:ssm:parameter-ARN:version-number + // + // * resolve:ssm:parameter-ARN:label + // + // For more information, see Use a Systems Manager parameter instead of an AMI + // ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id) + // in the Amazon EC2 User Guide. + // + // This parameter is only available for fleets of type instant. For fleets of + // type maintain and request, you must specify the AMI ID in the launch template. ImageId *string `type:"string"` // The attributes for the instance types. When you specify instance attributes, @@ -124002,7 +124955,13 @@ type FleetLaunchTemplateOverridesRequest struct { // A request of type instant can have only one subnet ID. SubnetId *string `type:"string"` - // The number of units provided by the specified instance type. + // The number of units provided by the specified instance type. These are the + // same units that you chose to set the target capacity in terms of instances, + // or a performance characteristic such as vCPUs, memory, or I/O. + // + // If the target capacity divided by this value is not a whole number, Amazon + // EC2 rounds the number of instances to the next whole number. If this value + // is not specified, the default is 1. // // When specifying weights, the price used in the lowest-price and price-capacity-optimized // allocation strategies is per unit hour (where the instance price is divided @@ -124503,7 +125462,7 @@ type FlowLog struct { // The maximum interval of time, in seconds, during which a flow of packets // is captured and aggregated into a flow log record. // - // When a network interface is attached to a Nitro-based instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances), + // When a network interface is attached to a Nitro-based instance (https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-nitro-instances.html), // the aggregation interval is always 60 seconds (1 minute) or less, regardless // of the specified value. // @@ -131356,7 +132315,7 @@ func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier { // Indicates whether your instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html). -// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your Amazon EC2 instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon EC2 User Guide. type HibernationOptions struct { _ struct{} `type:"structure"` @@ -131392,7 +132351,7 @@ func (s *HibernationOptions) SetConfigured(v bool) *HibernationOptions { // Indicates whether your instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html). -// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your Amazon EC2 instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon EC2 User Guide. type HibernationOptionsRequest struct { _ struct{} `type:"structure"` @@ -136830,7 +137789,7 @@ type InstanceNetworkInterface struct { // A security group connection tracking configuration that enables you to set // the timeout for connection tracking on an Elastic network interface. For // more information, see Connection tracking timeouts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. ConnectionTrackingConfiguration *ConnectionTrackingSpecificationResponse `locationName:"connectionTrackingConfiguration" type:"structure"` // The description. @@ -137198,7 +138157,7 @@ type InstanceNetworkInterfaceSpecification struct { // A security group connection tracking specification that enables you to set // the timeout for connection tracking on an Elastic network interface. For // more information, see Connection tracking timeouts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. ConnectionTrackingSpecification *ConnectionTrackingSpecificationRequest `type:"structure"` // If set to true, the interface is deleted when the instance is terminated. @@ -137750,7 +138709,7 @@ type InstanceRequirements struct { // // The parameter accepts an integer, which Amazon EC2 interprets as a percentage. // - // If you set DesiredCapacityType to vcpu or memory-mib, the price protection + // If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection // threshold is based on the per vCPU or per memory price instead of the per // instance price. // @@ -138242,7 +139201,7 @@ type InstanceRequirementsRequest struct { // // The parameter accepts an integer, which Amazon EC2 interprets as a percentage. // - // If you set DesiredCapacityType to vcpu or memory-mib, the price protection + // If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection // threshold is based on the per vCPU or per memory price instead of the per // instance price. // @@ -140421,8 +141380,11 @@ type IpamDiscoveredPublicAddress struct { // The resource discovery ID. IpamResourceDiscoveryId *string `locationName:"ipamResourceDiscoveryId" type:"string"` - // The network border group that the resource that the IP address is assigned - // to is in. + // The Availability Zone (AZ) or Local Zone (LZ) network border group that the + // resource that the IP address is assigned to is in. Defaults to an AZ network + // border group. For more information on available Local Zones, see Local Zone + // availability (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) + // in the Amazon EC2 User Guide. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` // The description of the network interface that IP address is assigned to. @@ -140598,6 +141560,9 @@ func (s *IpamDiscoveredPublicAddress) SetVpcId(v string) *IpamDiscoveredPublicAd type IpamDiscoveredResourceCidr struct { _ struct{} `type:"structure"` + // The Availability Zone ID. + AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + // The percentage of IP address space in use. To convert the decimal to a percentage, // multiply the decimal by 100. Note the following: // @@ -140618,6 +141583,10 @@ type IpamDiscoveredResourceCidr struct { // The resource discovery ID. IpamResourceDiscoveryId *string `locationName:"ipamResourceDiscoveryId" type:"string"` + // For elastic network interfaces, this is the status of whether or not the + // elastic network interface is attached. + NetworkInterfaceAttachmentStatus *string `locationName:"networkInterfaceAttachmentStatus" type:"string" enum:"IpamNetworkInterfaceAttachmentStatus"` + // The resource CIDR. ResourceCidr *string `locationName:"resourceCidr" type:"string"` @@ -140661,6 +141630,12 @@ func (s IpamDiscoveredResourceCidr) GoString() string { return s.String() } +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *IpamDiscoveredResourceCidr) SetAvailabilityZoneId(v string) *IpamDiscoveredResourceCidr { + s.AvailabilityZoneId = &v + return s +} + // SetIpUsage sets the IpUsage field's value. func (s *IpamDiscoveredResourceCidr) SetIpUsage(v float64) *IpamDiscoveredResourceCidr { s.IpUsage = &v @@ -140673,6 +141648,12 @@ func (s *IpamDiscoveredResourceCidr) SetIpamResourceDiscoveryId(v string) *IpamD return s } +// SetNetworkInterfaceAttachmentStatus sets the NetworkInterfaceAttachmentStatus field's value. +func (s *IpamDiscoveredResourceCidr) SetNetworkInterfaceAttachmentStatus(v string) *IpamDiscoveredResourceCidr { + s.NetworkInterfaceAttachmentStatus = &v + return s +} + // SetResourceCidr sets the ResourceCidr field's value. func (s *IpamDiscoveredResourceCidr) SetResourceCidr(v string) *IpamDiscoveredResourceCidr { s.ResourceCidr = &v @@ -140776,6 +141757,131 @@ func (s *IpamDiscoveryFailureReason) SetMessage(v string) *IpamDiscoveryFailureR return s } +// A verification token is an Amazon Web Services-generated random value that +// you can use to prove ownership of an external resource. For example, you +// can use a verification token to validate that you control a public IP address +// range when you bring an IP address range to Amazon Web Services (BYOIP). +type IpamExternalResourceVerificationToken struct { + _ struct{} `type:"structure"` + + // ARN of the IPAM that created the token. + IpamArn *string `locationName:"ipamArn" min:"1" type:"string"` + + // Token ARN. + IpamExternalResourceVerificationTokenArn *string `locationName:"ipamExternalResourceVerificationTokenArn" min:"1" type:"string"` + + // The ID of the token. + IpamExternalResourceVerificationTokenId *string `locationName:"ipamExternalResourceVerificationTokenId" type:"string"` + + // The ID of the IPAM that created the token. + IpamId *string `locationName:"ipamId" type:"string"` + + // Region of the IPAM that created the token. + IpamRegion *string `locationName:"ipamRegion" type:"string"` + + // Token expiration. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp"` + + // Token state. + State *string `locationName:"state" type:"string" enum:"IpamExternalResourceVerificationTokenState"` + + // Token status. + Status *string `locationName:"status" type:"string" enum:"TokenState"` + + // Token tags. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // Token name. + TokenName *string `locationName:"tokenName" type:"string"` + + // Token value. + TokenValue *string `locationName:"tokenValue" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IpamExternalResourceVerificationToken) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IpamExternalResourceVerificationToken) GoString() string { + return s.String() +} + +// SetIpamArn sets the IpamArn field's value. +func (s *IpamExternalResourceVerificationToken) SetIpamArn(v string) *IpamExternalResourceVerificationToken { + s.IpamArn = &v + return s +} + +// SetIpamExternalResourceVerificationTokenArn sets the IpamExternalResourceVerificationTokenArn field's value. +func (s *IpamExternalResourceVerificationToken) SetIpamExternalResourceVerificationTokenArn(v string) *IpamExternalResourceVerificationToken { + s.IpamExternalResourceVerificationTokenArn = &v + return s +} + +// SetIpamExternalResourceVerificationTokenId sets the IpamExternalResourceVerificationTokenId field's value. +func (s *IpamExternalResourceVerificationToken) SetIpamExternalResourceVerificationTokenId(v string) *IpamExternalResourceVerificationToken { + s.IpamExternalResourceVerificationTokenId = &v + return s +} + +// SetIpamId sets the IpamId field's value. +func (s *IpamExternalResourceVerificationToken) SetIpamId(v string) *IpamExternalResourceVerificationToken { + s.IpamId = &v + return s +} + +// SetIpamRegion sets the IpamRegion field's value. +func (s *IpamExternalResourceVerificationToken) SetIpamRegion(v string) *IpamExternalResourceVerificationToken { + s.IpamRegion = &v + return s +} + +// SetNotAfter sets the NotAfter field's value. +func (s *IpamExternalResourceVerificationToken) SetNotAfter(v time.Time) *IpamExternalResourceVerificationToken { + s.NotAfter = &v + return s +} + +// SetState sets the State field's value. +func (s *IpamExternalResourceVerificationToken) SetState(v string) *IpamExternalResourceVerificationToken { + s.State = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *IpamExternalResourceVerificationToken) SetStatus(v string) *IpamExternalResourceVerificationToken { + s.Status = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *IpamExternalResourceVerificationToken) SetTags(v []*Tag) *IpamExternalResourceVerificationToken { + s.Tags = v + return s +} + +// SetTokenName sets the TokenName field's value. +func (s *IpamExternalResourceVerificationToken) SetTokenName(v string) *IpamExternalResourceVerificationToken { + s.TokenName = &v + return s +} + +// SetTokenValue sets the TokenValue field's value. +func (s *IpamExternalResourceVerificationToken) SetTokenValue(v string) *IpamExternalResourceVerificationToken { + s.TokenValue = &v + return s +} + // The operating Regions for an IPAM. Operating Regions are Amazon Web Services // Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only discovers // and monitors resources in the Amazon Web Services Regions you select as operating @@ -140892,14 +141998,20 @@ type IpamPool struct { // overlap or conflict. IpamScopeType *string `locationName:"ipamScopeType" type:"string" enum:"IpamScopeType"` - // The locale of the IPAM pool. In IPAM, the locale is the Amazon Web Services - // Region where you want to make an IPAM pool available for allocations. Only - // resources in the same Region as the locale of the pool can get IP address - // allocations from the pool. You can only allocate a CIDR for a VPC, for example, - // from an IPAM pool that shares a locale with the VPC’s Region. Note that - // once you choose a Locale for a pool, you cannot modify it. If you choose - // an Amazon Web Services Region for locale that has not been configured as - // an operating Region for the IPAM, you'll get an error. + // The locale of the IPAM pool. + // + // The locale for the pool should be one of the following: + // + // * An Amazon Web Services Region where you want this IPAM pool to be available + // for allocations. + // + // * The network border group for an Amazon Web Services Local Zone where + // you want this IPAM pool to be available for allocations (supported Local + // Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail)). + // This option is only available for IPAM IPv4 pools in the public scope. + // + // If you choose an Amazon Web Services Region for locale that has not been + // configured as an operating Region for the IPAM, you'll get an error. Locale *string `locationName:"locale" type:"string"` // The Amazon Web Services account ID of the owner of the IPAM pool. @@ -141546,6 +142658,9 @@ func (s *IpamPublicAddressTags) SetEipTags(v []*IpamPublicAddressTag) *IpamPubli type IpamResourceCidr struct { _ struct{} `type:"structure"` + // The Availability Zone ID. + AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + // The compliance status of the IPAM resource. For more information on compliance // statuses, see Monitor CIDR usage by resource (https://docs.aws.amazon.com/vpc/latest/ipam/monitor-cidr-compliance-ipam.html) // in the Amazon VPC IPAM User Guide. @@ -141631,6 +142746,12 @@ func (s IpamResourceCidr) GoString() string { return s.String() } +// SetAvailabilityZoneId sets the AvailabilityZoneId field's value. +func (s *IpamResourceCidr) SetAvailabilityZoneId(v string) *IpamResourceCidr { + s.AvailabilityZoneId = &v + return s +} + // SetComplianceStatus sets the ComplianceStatus field's value. func (s *IpamResourceCidr) SetComplianceStatus(v string) *IpamResourceCidr { s.ComplianceStatus = &v @@ -142198,9 +143319,9 @@ func (s *IpamScope) SetTags(v []*Tag) *IpamScope { type Ipv4PrefixSpecification struct { _ struct{} `type:"structure"` - // The IPv4 prefix. For information, see Assigning prefixes to Amazon EC2 network - // interfaces (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html) - // in the Amazon Elastic Compute Cloud User Guide. + // The IPv4 prefix. For information, see Assigning prefixes to network interfaces + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html) + // in the Amazon EC2 User Guide. Ipv4Prefix *string `locationName:"ipv4Prefix" type:"string"` } @@ -142232,9 +143353,9 @@ func (s *Ipv4PrefixSpecification) SetIpv4Prefix(v string) *Ipv4PrefixSpecificati type Ipv4PrefixSpecificationRequest struct { _ struct{} `type:"structure"` - // The IPv4 prefix. For information, see Assigning prefixes to Amazon EC2 network - // interfaces (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html) - // in the Amazon Elastic Compute Cloud User Guide. + // The IPv4 prefix. For information, see Assigning prefixes to network interfaces + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html) + // in the Amazon EC2 User Guide. Ipv4Prefix *string `type:"string"` } @@ -144295,7 +145416,7 @@ func (s *LaunchTemplateInstanceMarketOptionsRequest) SetSpotOptions(v *LaunchTem // The metadata options for the instance. For more information, see Instance // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. type LaunchTemplateInstanceMetadataOptions struct { _ struct{} `type:"structure"` @@ -144406,7 +145527,7 @@ func (s *LaunchTemplateInstanceMetadataOptions) SetState(v string) *LaunchTempla // The metadata options for the instance. For more information, see Instance // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) -// in the Amazon Elastic Compute Cloud User Guide. +// in the Amazon EC2 User Guide. type LaunchTemplateInstanceMetadataOptionsRequest struct { _ struct{} `type:"structure"` @@ -144528,8 +145649,8 @@ type LaunchTemplateInstanceNetworkInterfaceSpecification struct { // A security group connection tracking specification that enables you to set // the timeout for connection tracking on an Elastic network interface. For - // more information, see Connection tracking timeouts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) - // in the Amazon Elastic Compute Cloud User Guide. + // more information, see Idle connection tracking timeout (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) + // in the Amazon EC2 User Guide. ConnectionTrackingSpecification *ConnectionTrackingSpecification `locationName:"connectionTrackingSpecification" type:"structure"` // Indicates whether the network interface is deleted when the instance is terminated. @@ -144769,8 +145890,8 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { // A security group connection tracking specification that enables you to set // the timeout for connection tracking on an Elastic network interface. For - // more information, see Connection tracking timeouts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) - // in the Amazon Elastic Compute Cloud User Guide. + // more information, see Idle connection tracking timeout (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) + // in the Amazon EC2 User Guide. ConnectionTrackingSpecification *ConnectionTrackingSpecificationRequest `type:"structure"` // Indicates whether the network interface is deleted when the instance is terminated. @@ -144794,7 +145915,7 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { // The type of network interface. To create an Elastic Fabric Adapter (EFA), // specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. // // If you are not creating an EFA, specify interface or omit this parameter. // @@ -145115,9 +146236,15 @@ type LaunchTemplateOverrides struct { // The ID of the subnet in which to launch the instances. SubnetId *string `locationName:"subnetId" type:"string"` - // The number of units provided by the specified instance type. + // The number of units provided by the specified instance type. These are the + // same units that you chose to set the target capacity in terms of instances, + // or a performance characteristic such as vCPUs, memory, or I/O. // - // When specifying weights, the price used in the lowest-price and price-capacity-optimized + // If the target capacity divided by this value is not a whole number, Amazon + // EC2 rounds the number of instances to the next whole number. If this value + // is not specified, the default is 1. + // + // When specifying weights, the price used in the lowestPrice and priceCapacityOptimized // allocation strategies is per unit hour (where the instance price is divided // by the specified weight). However, if all the specified weights are above // the requested TargetCapacity, resulting in only 1 instance being launched, @@ -148251,10 +149378,9 @@ type ModifyAvailabilityZoneGroupInput struct { // GroupName is a required field GroupName *string `type:"string" required:"true"` - // Indicates whether you are opted in to the Local Zone group or Wavelength - // Zone group. The only valid value is opted-in. You must contact Amazon Web - // Services Support (https://console.aws.amazon.com/support/home#/case/create%3FissueType=customer-service%26serviceCode=general-info%26getting-started%26categoryCode=using-aws%26services) - // to opt out of a Local Zone or Wavelength Zone group. + // Indicates whether to opt in to the zone group. The only valid value is opted-in. + // You must contact Amazon Web Services Support to opt out of a Local Zone or + // Wavelength Zone group. // // OptInStatus is a required field OptInStatus *string `type:"string" required:"true" enum:"ModifyAvailabilityZoneOptInStatus"` @@ -148967,9 +150093,9 @@ type ModifyEbsDefaultKmsKeyIdInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The identifier of the Key Management Service (KMS) KMS key to use for Amazon - // EBS encryption. If this parameter is not specified, your KMS key for Amazon - // EBS is used. If KmsKeyId is specified, the encrypted state must be true. + // The identifier of the KMS key to use for Amazon EBS encryption. If this parameter + // is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, + // the encrypted state must be true. // // You can specify the KMS key using any of the following: // @@ -149941,7 +151067,7 @@ type ModifyInstanceAttributeInput struct { BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // Indicates whether an instance is enabled for stop protection. For more information, - // see Stop Protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection). + // see Enable stop protection for your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-stop-protection.html). DisableApiStop *AttributeBooleanValue `type:"structure"` // If the value is true, you can't terminate the instance using the Amazon EC2 @@ -150016,10 +151142,10 @@ type ModifyInstanceAttributeInput struct { // a PV instance can make it unreachable. SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` - // Changes the instance's user data to the specified value. If you are using - // an Amazon Web Services SDK or command line tool, base64-encoding is performed - // for you, and you can load the text from a file. Otherwise, you must provide - // base64-encoded text. + // Changes the instance's user data to the specified value. User data must be + // base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding + // might be performed for you. For more information, see Work with instance + // user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-add-user-data.html). UserData *BlobAttributeValue `locationName:"userData" type:"structure"` // A new value for the attribute. Use only with the kernel, ramdisk, userData, @@ -153655,7 +154781,10 @@ func (s *ModifyTrafficMirrorFilterRuleInput) SetTrafficMirrorFilterRuleId(v stri type ModifyTrafficMirrorFilterRuleOutput struct { _ struct{} `type:"structure"` - // Modifies a Traffic Mirror rule. + // + // Tags are not returned for ModifyTrafficMirrorFilterRule. + // + // A Traffic Mirror rule. TrafficMirrorFilterRule *TrafficMirrorFilterRule `locationName:"trafficMirrorFilterRule" type:"structure"` } @@ -154465,8 +155594,8 @@ type ModifyVerifiedAccessEndpointInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the Verified Access endpoint. @@ -154675,8 +155804,8 @@ type ModifyVerifiedAccessEndpointPolicyInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -154820,8 +155949,8 @@ type ModifyVerifiedAccessGroupInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the Verified Access group. @@ -154938,8 +156067,8 @@ type ModifyVerifiedAccessGroupPolicyInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -155083,8 +156212,8 @@ type ModifyVerifiedAccessInstanceInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the Verified Access instance. @@ -155166,8 +156295,8 @@ type ModifyVerifiedAccessInstanceLoggingConfigurationInput struct { AccessLogs *VerifiedAccessLogOptions `type:"structure" required:"true"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -155345,8 +156474,8 @@ type ModifyVerifiedAccessTrustProviderInput struct { _ struct{} `type:"structure"` // A unique, case-sensitive token that you provide to ensure idempotency of - // your modification request. For more information, see Ensuring Idempotency - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // your modification request. For more information, see Ensuring idempotency + // (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A description for the Verified Access trust provider. @@ -155680,7 +156809,7 @@ type ModifyVolumeInput struct { // * io2: 100 - 256,000 IOPS // // For io2 volumes, you can achieve up to 256,000 IOPS on instances built on - // the Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // the Nitro System (https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-nitro-instances.html). // On other instances, you can achieve performance up to 32,000 IOPS. // // Default: The existing value is retained if you keep the same volume type. @@ -155688,7 +156817,7 @@ type ModifyVolumeInput struct { Iops *int64 `type:"integer"` // Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, - // you can attach the volume to up to 16 Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) + // you can attach the volume to up to 16 Nitro-based instances (https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-nitro-instances.html) // in the same Availability Zone. This parameter is supported with io1 and io2 // volumes only. For more information, see Amazon EBS Multi-Attach (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volumes-multi.html) // in the Amazon EBS User Guide. @@ -158088,8 +159217,8 @@ type NatGateway struct { NatGatewayId *string `locationName:"natGatewayId" type:"string"` // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), - // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + // (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-gateways), + // contact Amazon Web Services Support. ProvisionedBandwidth *ProvisionedBandwidth `locationName:"provisionedBandwidth" type:"structure"` // The state of the NAT gateway. @@ -158313,7 +159442,7 @@ func (s *NatGatewayAddress) SetStatus(v string) *NatGatewayAddress { type NetworkAcl struct { _ struct{} `type:"structure"` - // Any associations between the network ACL and one or more subnets + // Any associations between the network ACL and your subnets Associations []*NetworkAclAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` // The entries (rules) in the network ACL. @@ -159431,7 +160560,7 @@ type NetworkInterface struct { // A security group connection tracking configuration that enables you to set // the timeout for connection tracking on an Elastic network interface. For // more information, see Connection tracking timeouts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. ConnectionTrackingConfiguration *ConnectionTrackingConfiguration `locationName:"connectionTrackingConfiguration" type:"structure"` // Indicates whether a network interface with an IPv6 address is unreachable @@ -160605,13 +161734,13 @@ type OnDemandOptions struct { // credits, and, if you use surplus credits, your final cost might be higher // than what you specified for maxTotalPrice. For more information, see Surplus // credits can incur charges (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances-unlimited-mode-concepts.html#unlimited-mode-surplus-credits) - // in the EC2 User Guide. + // in the Amazon EC2 User Guide. MaxTotalPrice *string `locationName:"maxTotalPrice" type:"string"` - // The minimum target capacity for On-Demand Instances in the fleet. If the - // minimum target capacity is not reached, the fleet launches no instances. + // The minimum target capacity for On-Demand Instances in the fleet. If this + // minimum capacity isn't reached, no instances are launched. // - // Supported only for fleets of type instant. + // Constraints: Maximum value of 1000. Supported only for fleets of type instant. // // At least one of the following must be specified: SingleAvailabilityZone | // SingleInstanceType @@ -160715,13 +161844,13 @@ type OnDemandOptionsRequest struct { // credits, and, if you use surplus credits, your final cost might be higher // than what you specified for MaxTotalPrice. For more information, see Surplus // credits can incur charges (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances-unlimited-mode-concepts.html#unlimited-mode-surplus-credits) - // in the EC2 User Guide. + // in the Amazon EC2 User Guide. MaxTotalPrice *string `type:"string"` - // The minimum target capacity for On-Demand Instances in the fleet. If the - // minimum target capacity is not reached, the fleet launches no instances. + // The minimum target capacity for On-Demand Instances in the fleet. If this + // minimum capacity isn't reached, no instances are launched. // - // Supported only for fleets of type instant. + // Constraints: Maximum value of 1000. Supported only for fleets of type instant. // // At least one of the following must be specified: SingleAvailabilityZone | // SingleInstanceType @@ -163502,12 +164631,12 @@ type ProvisionIpamPoolCidrInput struct { Cidr *string `type:"string"` // A signed document that proves that you are authorized to bring a specified - // IP address range to Amazon using BYOIP. This option applies to public pools - // only. + // IP address range to Amazon using BYOIP. This option only applies to IPv4 + // and IPv6 pools in the public scope. CidrAuthorizationContext *IpamCidrAuthorizationContext `type:"structure"` // A unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // A check for whether you have the required permissions for the action without @@ -163516,6 +164645,10 @@ type ProvisionIpamPoolCidrInput struct { // is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // Verification token ID. This option only applies to IPv4 and IPv6 pools in + // the public scope. + IpamExternalResourceVerificationTokenId *string `type:"string"` + // The ID of the IPAM pool to which you want to assign a CIDR. // // IpamPoolId is a required field @@ -163526,6 +164659,11 @@ type ProvisionIpamPoolCidrInput struct { // provisioning CIDRs to pools with source pools. Cannot be used to provision // BYOIP CIDRs to top-level pools. Either "NetmaskLength" or "Cidr" is required. NetmaskLength *int64 `type:"integer"` + + // The method for verifying control of a public IP address range. Defaults to + // remarks-x509 if not specified. This option only applies to IPv4 and IPv6 + // pools in the public scope. + VerificationMethod *string `type:"string" enum:"VerificationMethod"` } // String returns the string representation. @@ -163583,6 +164721,12 @@ func (s *ProvisionIpamPoolCidrInput) SetDryRun(v bool) *ProvisionIpamPoolCidrInp return s } +// SetIpamExternalResourceVerificationTokenId sets the IpamExternalResourceVerificationTokenId field's value. +func (s *ProvisionIpamPoolCidrInput) SetIpamExternalResourceVerificationTokenId(v string) *ProvisionIpamPoolCidrInput { + s.IpamExternalResourceVerificationTokenId = &v + return s +} + // SetIpamPoolId sets the IpamPoolId field's value. func (s *ProvisionIpamPoolCidrInput) SetIpamPoolId(v string) *ProvisionIpamPoolCidrInput { s.IpamPoolId = &v @@ -163595,6 +164739,12 @@ func (s *ProvisionIpamPoolCidrInput) SetNetmaskLength(v int64) *ProvisionIpamPoo return s } +// SetVerificationMethod sets the VerificationMethod field's value. +func (s *ProvisionIpamPoolCidrInput) SetVerificationMethod(v string) *ProvisionIpamPoolCidrInput { + s.VerificationMethod = &v + return s +} + type ProvisionIpamPoolCidrOutput struct { _ struct{} `type:"structure"` @@ -163646,6 +164796,13 @@ type ProvisionPublicIpv4PoolCidrInput struct { // NetmaskLength is a required field NetmaskLength *int64 `type:"integer" required:"true"` + // The Availability Zone (AZ) or Local Zone (LZ) network border group that the + // resource that the IP address is assigned to is in. Defaults to an AZ network + // border group. For more information on available Local Zones, see Local Zone + // availability (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) + // in the Amazon EC2 User Guide. + NetworkBorderGroup *string `type:"string"` + // The ID of the public IPv4 pool you would like to use for this CIDR. // // PoolId is a required field @@ -163707,6 +164864,12 @@ func (s *ProvisionPublicIpv4PoolCidrInput) SetNetmaskLength(v int64) *ProvisionP return s } +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *ProvisionPublicIpv4PoolCidrInput) SetNetworkBorderGroup(v string) *ProvisionPublicIpv4PoolCidrInput { + s.NetworkBorderGroup = &v + return s +} + // SetPoolId sets the PoolId field's value. func (s *ProvisionPublicIpv4PoolCidrInput) SetPoolId(v string) *ProvisionPublicIpv4PoolCidrInput { s.PoolId = &v @@ -163754,34 +164917,24 @@ func (s *ProvisionPublicIpv4PoolCidrOutput) SetPoolId(v string) *ProvisionPublic } // Reserved. If you need to sustain traffic greater than the documented limits -// (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), -// contact us through the Support Center (https://console.aws.amazon.com/support/home?). +// (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-gateways), +// contact Amazon Web Services Support. type ProvisionedBandwidth struct { _ struct{} `type:"structure"` - // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), - // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + // Reserved. ProvisionTime *time.Time `locationName:"provisionTime" type:"timestamp"` - // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), - // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + // Reserved. Provisioned *string `locationName:"provisioned" type:"string"` - // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), - // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + // Reserved. RequestTime *time.Time `locationName:"requestTime" type:"timestamp"` - // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), - // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + // Reserved. Requested *string `locationName:"requested" type:"string"` - // Reserved. If you need to sustain traffic greater than the documented limits - // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), - // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + // Reserved. Status *string `locationName:"status" type:"string"` } @@ -164573,7 +165726,7 @@ type PurchaseReservedInstancesOfferingOutput struct { // The IDs of the purchased Reserved Instances. If your purchase crosses into // a discounted pricing tier, the final Reserved Instances IDs might change. // For more information, see Crossing pricing tiers (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-reserved-instances-application.html#crossing-pricing-tiers) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` } @@ -167774,17 +168927,17 @@ type RequestLaunchTemplateData struct { // type, platform, Availability Zone). CapacityReservationSpecification *LaunchTemplateCapacityReservationSpecificationRequest `type:"structure"` - // The CPU options for the instance. For more information, see Optimizing CPU - // Options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) - // in the Amazon Elastic Compute Cloud User Guide. + // The CPU options for the instance. For more information, see Optimize CPU + // options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + // in the Amazon EC2 User Guide. CpuOptions *LaunchTemplateCpuOptionsRequest `type:"structure"` // The credit option for CPU usage of the instance. Valid only for T instances. CreditSpecification *CreditSpecificationRequest `type:"structure"` // Indicates whether to enable the instance for stop protection. For more information, - // see Stop protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection) - // in the Amazon Elastic Compute Cloud User Guide. + // see Enable stop protection for your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-stop-protection.html) + // in the Amazon EC2 User Guide. DisableApiStop *bool `type:"boolean"` // If you set this parameter to true, you can't terminate the instance using @@ -167833,19 +168986,24 @@ type RequestLaunchTemplateData struct { // Indicates whether an instance is enabled for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html). - // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) - // in the Amazon Elastic Compute Cloud User Guide. + // For more information, see Hibernate your Amazon EC2 instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // in the Amazon EC2 User Guide. HibernationOptions *LaunchTemplateHibernationOptionsRequest `type:"structure"` // The name or Amazon Resource Name (ARN) of an IAM instance profile. IamInstanceProfile *LaunchTemplateIamInstanceProfileSpecificationRequest `type:"structure"` - // The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, - // which will resolve to an AMI ID on launch. + // The ID of the AMI in the format ami-17characters00000. // - // Valid formats: + // Alternatively, you can specify a Systems Manager parameter, using one of + // the following formats. The Systems Manager parameter will resolve to an AMI + // ID on launch. + // + // To reference a public parameter: + // + // * resolve:ssm:public-parameter // - // * ami-17characters00000 + // To reference a parameter stored in the same account: // // * resolve:ssm:parameter-name // @@ -167853,15 +169011,26 @@ type RequestLaunchTemplateData struct { // // * resolve:ssm:parameter-name:label // - // * resolve:ssm:public-parameter + // To reference a parameter shared from another Amazon Web Services account: + // + // * resolve:ssm:parameter-ARN + // + // * resolve:ssm:parameter-ARN:version-number // - // Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager - // parameter. If the launch template will be used by an EC2 Fleet or Spot Fleet, - // you must specify the AMI ID. + // * resolve:ssm:parameter-ARN:label // // For more information, see Use a Systems Manager parameter instead of an AMI // ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. + // + // If the launch template will be used for an EC2 Fleet or Spot Fleet, note + // the following: + // + // * Only EC2 Fleets of type instant support specifying a Systems Manager + // parameter. + // + // * For EC2 Fleets of type maintain or request, or for Spot Fleets, you + // must specify the AMI ID. ImageId *string `type:"string"` // Indicates whether an instance stops or terminates when you initiate shutdown @@ -167909,8 +169078,8 @@ type RequestLaunchTemplateData struct { // in the Amazon EC2 User Guide. InstanceRequirements *InstanceRequirementsRequest `type:"structure"` - // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) - // in the Amazon Elastic Compute Cloud User Guide. + // The instance type. For more information, see Amazon EC2 instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon EC2 User Guide. // // If you specify InstanceType, you can't specify InstanceRequirements. InstanceType *string `type:"string" enum:"InstanceType"` @@ -167919,7 +169088,7 @@ type RequestLaunchTemplateData struct { // // We recommend that you use PV-GRUB instead of kernels and RAM disks. For more // information, see User provided kernels (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. KernelId *string `type:"string"` // The name of the key pair. You can create a key pair using CreateKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) @@ -167937,7 +169106,7 @@ type RequestLaunchTemplateData struct { // The metadata options for the instance. For more information, see Instance // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. MetadataOptions *LaunchTemplateInstanceMetadataOptionsRequest `type:"structure"` // The monitoring for the instance. @@ -167957,7 +169126,7 @@ type RequestLaunchTemplateData struct { // // We recommend that you use PV-GRUB instead of kernels and RAM disks. For more // information, see User provided kernels (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. RamDiskId *string `type:"string"` // The IDs of the security groups. @@ -167979,9 +169148,8 @@ type RequestLaunchTemplateData struct { // The user data to make available to the instance. You must provide base64-encoded // text. User data is limited to 16 KB. For more information, see Run commands - // on your Linux instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) - // (Linux) or Work with instance user data (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/instancedata-add-user-data.html) - // (Windows) in the Amazon Elastic Compute Cloud User Guide. + // on your Amazon EC2 instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) + // in the Amazon EC2 User Guide. // // If you are creating the launch template for use with Batch, the user data // must be provided in the MIME multi-part archive format (https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive). @@ -168363,8 +169531,9 @@ type RequestSpotInstancesInput struct { BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) - // in the Amazon EC2 User Guide for Linux Instances. + // of the request. For more information, see Ensuring idempotency in Amazon + // EC2 API requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // in the Amazon EC2 User Guide. ClientToken *string `locationName:"clientToken" type:"string"` // Checks whether you have the required permissions for the action, without @@ -170598,17 +171767,17 @@ type ResponseLaunchTemplateData struct { // Information about the Capacity Reservation targeting option. CapacityReservationSpecification *LaunchTemplateCapacityReservationSpecificationResponse `locationName:"capacityReservationSpecification" type:"structure"` - // The CPU options for the instance. For more information, see Optimizing CPU + // The CPU options for the instance. For more information, see Optimize CPU // options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. CpuOptions *LaunchTemplateCpuOptions `locationName:"cpuOptions" type:"structure"` // The credit option for CPU usage of the instance. CreditSpecification *CreditSpecification `locationName:"creditSpecification" type:"structure"` // Indicates whether the instance is enabled for stop protection. For more information, - // see Stop protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection) - // in the Amazon Elastic Compute Cloud User Guide. + // see Enable stop protection for your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-stop-protection.html) + // in the Amazon EC2 User Guide. DisableApiStop *bool `locationName:"disableApiStop" type:"boolean"` // If set to true, indicates that the instance cannot be terminated using the @@ -170644,8 +171813,8 @@ type ResponseLaunchTemplateData struct { EnclaveOptions *LaunchTemplateEnclaveOptions `locationName:"enclaveOptions" type:"structure"` // Indicates whether an instance is configured for hibernation. For more information, - // see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) - // in the Amazon Elastic Compute Cloud User Guide. + // see Hibernate your Amazon EC2 instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // in the Amazon EC2 User Guide. HibernationOptions *LaunchTemplateHibernationOptions `locationName:"hibernationOptions" type:"structure"` // The IAM instance profile. @@ -170668,7 +171837,7 @@ type ResponseLaunchTemplateData struct { // // For more information, see Use a Systems Manager parameter instead of an AMI // ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. ImageId *string `locationName:"imageId" type:"string"` // Indicates whether an instance stops or terminates when you initiate shutdown @@ -170701,7 +171870,7 @@ type ResponseLaunchTemplateData struct { // The metadata options for the instance. For more information, see Instance // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide. MetadataOptions *LaunchTemplateInstanceMetadataOptions `locationName:"metadataOptions" type:"structure"` // The monitoring for the instance. @@ -172222,7 +173391,7 @@ func (s *Route) SetVpcPeeringConnectionId(v string) *Route { type RouteTable struct { _ struct{} `type:"structure"` - // The associations between the route table and one or more subnets or a gateway. + // The associations between the route table and your subnets or gateways. Associations []*RouteTableAssociation `locationName:"associationSet" locationNameList:"item" type:"list"` // The ID of the Amazon Web Services account that owns the route table. @@ -172653,7 +173822,7 @@ type RunInstancesInput struct { // Indicates whether an instance is enabled for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html). - // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // For more information, see Hibernate your Amazon EC2 instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon EC2 User Guide. // // You can't enable hibernation and Amazon Web Services Nitro Enclaves on the @@ -172679,7 +173848,7 @@ type RunInstancesInput struct { // InstanceInterruptionBehavior is set to either hibernate or stop. InstanceMarketOptions *InstanceMarketOptionsRequest `type:"structure"` - // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // The instance type. For more information, see Amazon EC2 instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) // in the Amazon EC2 User Guide. InstanceType *string `type:"string" enum:"InstanceType"` @@ -172726,14 +173895,14 @@ type RunInstancesInput struct { // The maintenance and recovery options for the instance. MaintenanceOptions *InstanceMaintenanceOptionsRequest `type:"structure"` - // The maximum number of instances to launch. If you specify more instances - // than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches - // the largest possible number of instances above MinCount. + // The maximum number of instances to launch. If you specify a value that is + // more capacity than Amazon EC2 can launch in the target Availability Zone, + // Amazon EC2 launches the largest possible number of instances above the specified + // minimum count. // - // Constraints: Between 1 and the maximum number you're allowed for the specified - // instance type. For more information about the default limits, and how to - // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) - // in the Amazon EC2 FAQ. + // Constraints: Between 1 and the quota for the specified instance type for + // your account for this Region. For more information, see Amazon EC2 instance + // type quotas (https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-instance-quotas.html). // // MaxCount is a required field MaxCount *int64 `type:"integer" required:"true"` @@ -172742,14 +173911,13 @@ type RunInstancesInput struct { // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). MetadataOptions *InstanceMetadataOptionsRequest `type:"structure"` - // The minimum number of instances to launch. If you specify a minimum that - // is more instances than Amazon EC2 can launch in the target Availability Zone, - // Amazon EC2 launches no instances. + // The minimum number of instances to launch. If you specify a value that is + // more capacity than Amazon EC2 can provide in the target Availability Zone, + // Amazon EC2 does not launch any instances. // - // Constraints: Between 1 and the maximum number you're allowed for the specified - // instance type. For more information about the default limits, and how to - // request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) - // in the Amazon EC2 General FAQ. + // Constraints: Between 1 and the quota for the specified instance type for + // your account for this Region. For more information, see Amazon EC2 instance + // type quotas (https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-instance-quotas.html). // // MinCount is a required field MinCount *int64 `type:"integer" required:"true"` @@ -172826,12 +173994,10 @@ type RunInstancesInput struct { // To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` - // The user data script to make available to the instance. For more information, - // see Run commands on your Linux instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) - // and Run commands on your Windows instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-windows-user-data.html). - // If you are using a command line tool, base64-encoding is performed for you, - // and you can load the text from a file. Otherwise, you must provide base64-encoded - // text. User data is limited to 16 KB. + // The user data to make available to the instance. User data must be base64-encoded. + // Depending on the tool or SDK that you're using, the base64-encoding might + // be performed for you. For more information, see Work with instance user data + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-add-user-data.html). // // UserData is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by RunInstancesInput's @@ -176215,8 +177381,8 @@ type Snapshot struct { // Indicates whether the snapshot is encrypted. Encrypted *bool `locationName:"encrypted" type:"boolean"` - // The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key - // that was used to protect the volume encryption key for the parent volume. + // The Amazon Resource Name (ARN) of the KMS key that was used to protect the + // volume encryption key for the parent volume. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` // The ARN of the Outpost on which the snapshot is stored. For more information, @@ -176254,10 +177420,9 @@ type Snapshot struct { State *string `locationName:"status" type:"string" enum:"SnapshotState"` // Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy - // operation fails (for example, if the proper Key Management Service (KMS) - // permissions are not obtained) this field displays error state details to - // help you diagnose why the error occurred. This parameter is only returned - // by DescribeSnapshots. + // operation fails (for example, if the proper KMS permissions are not obtained) + // this field displays error state details to help you diagnose why the error + // occurred. This parameter is only returned by DescribeSnapshots. StateMessage *string `locationName:"statusMessage" type:"string"` // The storage tier in which the snapshot is stored. standard indicates that @@ -177056,7 +178221,7 @@ func (s *SnapshotTierStatus) SetVolumeId(v string) *SnapshotTierStatus { // The Spot Instance replacement strategy to use when Amazon EC2 emits a signal // that your Spot Instance is at an elevated risk of being interrupted. For // more information, see Capacity rebalancing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-capacity-rebalance.html) -// in the Amazon EC2 User Guide for Linux Instances. +// in the Amazon EC2 User Guide. type SpotCapacityRebalance struct { _ struct{} `type:"structure"` @@ -177290,6 +178455,12 @@ type SpotFleetLaunchSpecification struct { // If the target capacity divided by this value is not a whole number, Amazon // EC2 rounds the number of instances to the next whole number. If this value // is not specified, the default is 1. + // + // When specifying weights, the price used in the lowestPrice and priceCapacityOptimized + // allocation strategies is per unit hour (where the instance price is divided + // by the specified weight). However, if all the specified weights are above + // the requested TargetCapacity, resulting in only 1 instance being launched, + // the price used is per instance hour. WeightedCapacity *float64 `locationName:"weightedCapacity" type:"double"` } @@ -177576,7 +178747,10 @@ type SpotFleetRequestConfigData struct { // Spot Fleet requests instances from all of the Spot Instance pools that you // specify. // - // lowestPrice + // lowestPrice (not recommended) + // + // We don't recommend the lowestPrice allocation strategy because it has the + // highest risk of interruption for your Spot Instances. // // Spot Fleet requests instances from the lowest priced Spot Instance pool that // has available capacity. If the lowest priced pool doesn't have available @@ -177690,7 +178864,7 @@ type SpotFleetRequestConfigData struct { // for surplus credits, and, if you use surplus credits, your final cost might // be higher than what you specified for onDemandMaxTotalPrice. For more information, // see Surplus credits can incur charges (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances-unlimited-mode-concepts.html#unlimited-mode-surplus-credits) - // in the EC2 User Guide. + // in the Amazon EC2 User Guide. OnDemandMaxTotalPrice *string `locationName:"onDemandMaxTotalPrice" type:"string"` // The number of On-Demand units to request. You can choose to set the target @@ -177722,7 +178896,7 @@ type SpotFleetRequestConfigData struct { // surplus credits, and, if you use surplus credits, your final cost might be // higher than what you specified for spotMaxTotalPrice. For more information, // see Surplus credits can incur charges (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances-unlimited-mode-concepts.html#unlimited-mode-surplus-credits) - // in the EC2 User Guide. + // in the Amazon EC2 User Guide. SpotMaxTotalPrice *string `locationName:"spotMaxTotalPrice" type:"string"` // The maximum price per unit hour that you are willing to pay for a Spot Instance. @@ -178089,7 +179263,7 @@ type SpotInstanceRequest struct { // The state of the Spot Instance request. Spot request status information helps // track your Spot Instance requests. For more information, see Spot request // status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-request-status.html) - // in the Amazon EC2 User Guide for Linux Instances. + // in the Amazon EC2 User Guide. State *string `locationName:"state" type:"string" enum:"SpotInstanceState"` // The status code and status message describing the Spot Instance request. @@ -178297,7 +179471,7 @@ type SpotInstanceStatus struct { // The status code. For a list of status codes, see Spot request status codes // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-request-status.html#spot-instance-request-status-understand) - // in the Amazon EC2 User Guide for Linux Instances. + // in the Amazon EC2 User Guide. Code *string `locationName:"code" type:"string"` // The description for the status code. @@ -178352,7 +179526,7 @@ type SpotMaintenanceStrategies struct { // The Spot Instance replacement strategy to use when Amazon EC2 emits a signal // that your Spot Instance is at an elevated risk of being interrupted. For // more information, see Capacity rebalancing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-capacity-rebalance.html) - // in the Amazon EC2 User Guide for Linux Instances. + // in the Amazon EC2 User Guide. CapacityRebalance *SpotCapacityRebalance `locationName:"capacityRebalance" type:"structure"` } @@ -178511,7 +179685,10 @@ type SpotOptions struct { // EC2 Fleet requests instances from all of the Spot Instance pools that you // specify. // - // lowest-price + // lowest-price (not recommended) + // + // We don't recommend the lowest-price allocation strategy because it has the + // highest risk of interruption for your Spot Instances. // // EC2 Fleet requests instances from the lowest priced Spot Instance pool that // has available capacity. If the lowest priced pool doesn't have available @@ -178564,13 +179741,13 @@ type SpotOptions struct { // credits, and, if you use surplus credits, your final cost might be higher // than what you specified for maxTotalPrice. For more information, see Surplus // credits can incur charges (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances-unlimited-mode-concepts.html#unlimited-mode-surplus-credits) - // in the EC2 User Guide. + // in the Amazon EC2 User Guide. MaxTotalPrice *string `locationName:"maxTotalPrice" type:"string"` - // The minimum target capacity for Spot Instances in the fleet. If the minimum - // target capacity is not reached, the fleet launches no instances. + // The minimum target capacity for Spot Instances in the fleet. If this minimum + // capacity isn't reached, no instances are launched. // - // Supported only for fleets of type instant. + // Constraints: Maximum value of 1000. Supported only for fleets of type instant. // // At least one of the following must be specified: SingleAvailabilityZone | // SingleInstanceType @@ -178691,7 +179868,10 @@ type SpotOptionsRequest struct { // EC2 Fleet requests instances from all of the Spot Instance pools that you // specify. // - // lowest-price + // lowest-price (not recommended) + // + // We don't recommend the lowest-price allocation strategy because it has the + // highest risk of interruption for your Spot Instances. // // EC2 Fleet requests instances from the lowest priced Spot Instance pool that // has available capacity. If the lowest priced pool doesn't have available @@ -178744,13 +179924,13 @@ type SpotOptionsRequest struct { // credits, and, if you use surplus credits, your final cost might be higher // than what you specified for MaxTotalPrice. For more information, see Surplus // credits can incur charges (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances-unlimited-mode-concepts.html#unlimited-mode-surplus-credits) - // in the EC2 User Guide. + // in the Amazon EC2 User Guide. MaxTotalPrice *string `type:"string"` - // The minimum target capacity for Spot Instances in the fleet. If the minimum - // target capacity is not reached, the fleet launches no instances. + // The minimum target capacity for Spot Instances in the fleet. If this minimum + // capacity isn't reached, no instances are launched. // - // Supported only for fleets of type instant. + // Constraints: Maximum value of 1000. Supported only for fleets of type instant. // // At least one of the following must be specified: SingleAvailabilityZone | // SingleInstanceType @@ -179285,7 +180465,7 @@ type StartNetworkInsightsAccessScopeAnalysisInput struct { _ struct{} `type:"structure"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -179396,7 +180576,7 @@ type StartNetworkInsightsAnalysisInput struct { AdditionalAccounts []*string `locationName:"AdditionalAccount" locationNameList:"item" type:"list"` // Unique, case-sensitive identifier that you provide to ensure the idempotency - // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // of the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` // Checks whether you have the required permissions for the action, without @@ -181764,6 +182944,9 @@ type TrafficMirrorFilterRule struct { // The source port range assigned to the Traffic Mirror rule. SourcePortRange *TrafficMirrorPortRange `locationName:"sourcePortRange" type:"structure"` + // Tags on Traffic Mirroring filter rules. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + // The traffic direction assigned to the Traffic Mirror rule. TrafficDirection *string `locationName:"trafficDirection" type:"string" enum:"TrafficDirection"` @@ -181840,6 +183023,12 @@ func (s *TrafficMirrorFilterRule) SetSourcePortRange(v *TrafficMirrorPortRange) return s } +// SetTags sets the Tags field's value. +func (s *TrafficMirrorFilterRule) SetTags(v []*Tag) *TrafficMirrorFilterRule { + s.Tags = v + return s +} + // SetTrafficDirection sets the TrafficDirection field's value. func (s *TrafficMirrorFilterRule) SetTrafficDirection(v string) *TrafficMirrorFilterRule { s.TrafficDirection = &v @@ -188336,8 +189525,8 @@ type Volume struct { // rate at which the volume accumulates I/O credits for bursting. Iops *int64 `locationName:"iops" type:"integer"` - // The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key - // that was used to protect the volume encryption key for the volume. + // The Amazon Resource Name (ARN) of the KMS key that was used to protect the + // volume encryption key for the volume. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` // Indicates whether Amazon EBS Multi-Attach is enabled. @@ -188645,16 +189834,13 @@ func (s *VolumeDetail) SetSize(v int64) *VolumeDetail { } // Describes the modification status of an EBS volume. -// -// If the volume has never been modified, some element values will be null. type VolumeModification struct { _ struct{} `type:"structure"` // The modification completion or failure time. EndTime *time.Time `locationName:"endTime" type:"timestamp"` - // The current modification state. The modification state is null for unmodified - // volumes. + // The current modification state. ModificationState *string `locationName:"modificationState" type:"string" enum:"VolumeModificationState"` // The original IOPS rate of the volume. @@ -193633,6 +194819,9 @@ func HostRecovery_Values() []string { } const ( + // HostTenancyDefault is a HostTenancy enum value + HostTenancyDefault = "default" + // HostTenancyDedicated is a HostTenancy enum value HostTenancyDedicated = "dedicated" @@ -193643,6 +194832,7 @@ const ( // HostTenancy_Values returns all elements of the HostTenancy enum func HostTenancy_Values() []string { return []string{ + HostTenancyDefault, HostTenancyDedicated, HostTenancyHost, } @@ -196624,6 +197814,78 @@ const ( // InstanceTypeGr68xlarge is a InstanceType enum value InstanceTypeGr68xlarge = "gr6.8xlarge" + + // InstanceTypeC7iFlexLarge is a InstanceType enum value + InstanceTypeC7iFlexLarge = "c7i-flex.large" + + // InstanceTypeC7iFlexXlarge is a InstanceType enum value + InstanceTypeC7iFlexXlarge = "c7i-flex.xlarge" + + // InstanceTypeC7iFlex2xlarge is a InstanceType enum value + InstanceTypeC7iFlex2xlarge = "c7i-flex.2xlarge" + + // InstanceTypeC7iFlex4xlarge is a InstanceType enum value + InstanceTypeC7iFlex4xlarge = "c7i-flex.4xlarge" + + // InstanceTypeC7iFlex8xlarge is a InstanceType enum value + InstanceTypeC7iFlex8xlarge = "c7i-flex.8xlarge" + + // InstanceTypeU7i12tb224xlarge is a InstanceType enum value + InstanceTypeU7i12tb224xlarge = "u7i-12tb.224xlarge" + + // InstanceTypeU7in16tb224xlarge is a InstanceType enum value + InstanceTypeU7in16tb224xlarge = "u7in-16tb.224xlarge" + + // InstanceTypeU7in24tb224xlarge is a InstanceType enum value + InstanceTypeU7in24tb224xlarge = "u7in-24tb.224xlarge" + + // InstanceTypeU7in32tb224xlarge is a InstanceType enum value + InstanceTypeU7in32tb224xlarge = "u7in-32tb.224xlarge" + + // InstanceTypeU7ib12tb224xlarge is a InstanceType enum value + InstanceTypeU7ib12tb224xlarge = "u7ib-12tb.224xlarge" + + // InstanceTypeC7gnMetal is a InstanceType enum value + InstanceTypeC7gnMetal = "c7gn.metal" + + // InstanceTypeR8gMedium is a InstanceType enum value + InstanceTypeR8gMedium = "r8g.medium" + + // InstanceTypeR8gLarge is a InstanceType enum value + InstanceTypeR8gLarge = "r8g.large" + + // InstanceTypeR8gXlarge is a InstanceType enum value + InstanceTypeR8gXlarge = "r8g.xlarge" + + // InstanceTypeR8g2xlarge is a InstanceType enum value + InstanceTypeR8g2xlarge = "r8g.2xlarge" + + // InstanceTypeR8g4xlarge is a InstanceType enum value + InstanceTypeR8g4xlarge = "r8g.4xlarge" + + // InstanceTypeR8g8xlarge is a InstanceType enum value + InstanceTypeR8g8xlarge = "r8g.8xlarge" + + // InstanceTypeR8g12xlarge is a InstanceType enum value + InstanceTypeR8g12xlarge = "r8g.12xlarge" + + // InstanceTypeR8g16xlarge is a InstanceType enum value + InstanceTypeR8g16xlarge = "r8g.16xlarge" + + // InstanceTypeR8g24xlarge is a InstanceType enum value + InstanceTypeR8g24xlarge = "r8g.24xlarge" + + // InstanceTypeR8g48xlarge is a InstanceType enum value + InstanceTypeR8g48xlarge = "r8g.48xlarge" + + // InstanceTypeR8gMetal24xl is a InstanceType enum value + InstanceTypeR8gMetal24xl = "r8g.metal-24xl" + + // InstanceTypeR8gMetal48xl is a InstanceType enum value + InstanceTypeR8gMetal48xl = "r8g.metal-48xl" + + // InstanceTypeMac2M1ultraMetal is a InstanceType enum value + InstanceTypeMac2M1ultraMetal = "mac2-m1ultra.metal" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -197425,6 +198687,30 @@ func InstanceType_Values() []string { InstanceTypeG648xlarge, InstanceTypeGr64xlarge, InstanceTypeGr68xlarge, + InstanceTypeC7iFlexLarge, + InstanceTypeC7iFlexXlarge, + InstanceTypeC7iFlex2xlarge, + InstanceTypeC7iFlex4xlarge, + InstanceTypeC7iFlex8xlarge, + InstanceTypeU7i12tb224xlarge, + InstanceTypeU7in16tb224xlarge, + InstanceTypeU7in24tb224xlarge, + InstanceTypeU7in32tb224xlarge, + InstanceTypeU7ib12tb224xlarge, + InstanceTypeC7gnMetal, + InstanceTypeR8gMedium, + InstanceTypeR8gLarge, + InstanceTypeR8gXlarge, + InstanceTypeR8g2xlarge, + InstanceTypeR8g4xlarge, + InstanceTypeR8g8xlarge, + InstanceTypeR8g12xlarge, + InstanceTypeR8g16xlarge, + InstanceTypeR8g24xlarge, + InstanceTypeR8g48xlarge, + InstanceTypeR8gMetal24xl, + InstanceTypeR8gMetal48xl, + InstanceTypeMac2M1ultraMetal, } } @@ -197584,6 +198870,38 @@ func IpamDiscoveryFailureCode_Values() []string { } } +const ( + // IpamExternalResourceVerificationTokenStateCreateInProgress is a IpamExternalResourceVerificationTokenState enum value + IpamExternalResourceVerificationTokenStateCreateInProgress = "create-in-progress" + + // IpamExternalResourceVerificationTokenStateCreateComplete is a IpamExternalResourceVerificationTokenState enum value + IpamExternalResourceVerificationTokenStateCreateComplete = "create-complete" + + // IpamExternalResourceVerificationTokenStateCreateFailed is a IpamExternalResourceVerificationTokenState enum value + IpamExternalResourceVerificationTokenStateCreateFailed = "create-failed" + + // IpamExternalResourceVerificationTokenStateDeleteInProgress is a IpamExternalResourceVerificationTokenState enum value + IpamExternalResourceVerificationTokenStateDeleteInProgress = "delete-in-progress" + + // IpamExternalResourceVerificationTokenStateDeleteComplete is a IpamExternalResourceVerificationTokenState enum value + IpamExternalResourceVerificationTokenStateDeleteComplete = "delete-complete" + + // IpamExternalResourceVerificationTokenStateDeleteFailed is a IpamExternalResourceVerificationTokenState enum value + IpamExternalResourceVerificationTokenStateDeleteFailed = "delete-failed" +) + +// IpamExternalResourceVerificationTokenState_Values returns all elements of the IpamExternalResourceVerificationTokenState enum +func IpamExternalResourceVerificationTokenState_Values() []string { + return []string{ + IpamExternalResourceVerificationTokenStateCreateInProgress, + IpamExternalResourceVerificationTokenStateCreateComplete, + IpamExternalResourceVerificationTokenStateCreateFailed, + IpamExternalResourceVerificationTokenStateDeleteInProgress, + IpamExternalResourceVerificationTokenStateDeleteComplete, + IpamExternalResourceVerificationTokenStateDeleteFailed, + } +} + const ( // IpamManagementStateManaged is a IpamManagementState enum value IpamManagementStateManaged = "managed" @@ -197604,6 +198922,22 @@ func IpamManagementState_Values() []string { } } +const ( + // IpamNetworkInterfaceAttachmentStatusAvailable is a IpamNetworkInterfaceAttachmentStatus enum value + IpamNetworkInterfaceAttachmentStatusAvailable = "available" + + // IpamNetworkInterfaceAttachmentStatusInUse is a IpamNetworkInterfaceAttachmentStatus enum value + IpamNetworkInterfaceAttachmentStatusInUse = "in-use" +) + +// IpamNetworkInterfaceAttachmentStatus_Values returns all elements of the IpamNetworkInterfaceAttachmentStatus enum +func IpamNetworkInterfaceAttachmentStatus_Values() []string { + return []string{ + IpamNetworkInterfaceAttachmentStatusAvailable, + IpamNetworkInterfaceAttachmentStatusInUse, + } +} + const ( // IpamOverlapStatusOverlapping is a IpamOverlapStatus enum value IpamOverlapStatusOverlapping = "overlapping" @@ -199862,6 +201196,9 @@ const ( // ResourceTypeInstanceConnectEndpoint is a ResourceType enum value ResourceTypeInstanceConnectEndpoint = "instance-connect-endpoint" + + // ResourceTypeIpamExternalResourceVerificationToken is a ResourceType enum value + ResourceTypeIpamExternalResourceVerificationToken = "ipam-external-resource-verification-token" ) // ResourceType_Values returns all elements of the ResourceType enum @@ -199953,6 +201290,7 @@ func ResourceType_Values() []string { ResourceTypeIpamResourceDiscovery, ResourceTypeIpamResourceDiscoveryAssociation, ResourceTypeInstanceConnectEndpoint, + ResourceTypeIpamExternalResourceVerificationToken, } } @@ -200736,6 +202074,22 @@ func TieringOperationStatus_Values() []string { } } +const ( + // TokenStateValid is a TokenState enum value + TokenStateValid = "valid" + + // TokenStateExpired is a TokenState enum value + TokenStateExpired = "expired" +) + +// TokenState_Values returns all elements of the TokenState enum +func TokenState_Values() []string { + return []string{ + TokenStateValid, + TokenStateExpired, + } +} + const ( // TpmSupportValuesV20 is a TpmSupportValues enum value TpmSupportValuesV20 = "v2.0" @@ -201424,6 +202778,22 @@ func UserTrustProviderType_Values() []string { } } +const ( + // VerificationMethodRemarksX509 is a VerificationMethod enum value + VerificationMethodRemarksX509 = "remarks-x509" + + // VerificationMethodDnsToken is a VerificationMethod enum value + VerificationMethodDnsToken = "dns-token" +) + +// VerificationMethod_Values returns all elements of the VerificationMethod enum +func VerificationMethod_Values() []string { + return []string{ + VerificationMethodRemarksX509, + VerificationMethodDnsToken, + } +} + const ( // VerifiedAccessEndpointAttachmentTypeVpc is a VerifiedAccessEndpointAttachmentType enum value VerifiedAccessEndpointAttachmentTypeVpc = "vpc" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go index 04f6c811b..827bd5194 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -179,8 +179,8 @@ func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req // // Creates and returns access and refresh tokens for clients and applications // that are authenticated using IAM entities. The access token can be used to -// fetch short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. +// fetch short-term credentials for the assigned Amazon Web Services accounts +// or to access application APIs using bearer authentication. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -331,6 +331,13 @@ func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *reques // Indicates that an error from the service occurred while trying to process // a request. // +// - InvalidRedirectUriException +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { req, out := c.RegisterClientRequest(input) @@ -619,6 +626,15 @@ type CreateTokenInput struct { // type is currently unsupported for the CreateToken API. Code *string `locationName:"code" type:"string"` + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + // Used only when calling this API for the Device Code grant type. This short-term // code is used to identify this authorization request. This comes from the // result of the StartDeviceAuthorization API. @@ -718,6 +734,12 @@ func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { return s } +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput { + s.CodeVerifier = &v + return s +} + // SetDeviceCode sets the DeviceCode field's value. func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { s.DeviceCode = &v @@ -751,7 +773,8 @@ func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { type CreateTokenOutput struct { _ struct{} `type:"structure"` - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateTokenOutput's @@ -863,6 +886,15 @@ type CreateTokenWithIAMInput struct { // persisted in the Authorization Code GrantOptions for the application. Code *string `locationName:"code" type:"string"` + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + // Supports the following OAuth grant types: Authorization Code, Refresh Token, // JWT Bearer, and Token Exchange. Specify one of the following values, depending // on the grant type that you want: @@ -982,6 +1014,12 @@ func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { return s } +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput { + s.CodeVerifier = &v + return s +} + // SetGrantType sets the GrantType field's value. func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { s.GrantType = &v @@ -1027,7 +1065,8 @@ func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWith type CreateTokenWithIAMOutput struct { _ struct{} `type:"structure"` - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's @@ -1495,6 +1534,78 @@ func (s *InvalidGrantException) RequestID() string { return s.RespMetadata.RequestID } +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_redirect_uri. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) GoString() string { + return s.String() +} + +func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error { + return &InvalidRedirectUriException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRedirectUriException) Code() string { + return "InvalidRedirectUriException" +} + +// Message returns the exception's message. +func (s *InvalidRedirectUriException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRedirectUriException) OrigErr() error { + return nil +} + +func (s *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRedirectUriException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRedirectUriException) RequestID() string { + return s.RespMetadata.RequestID +} + // Indicates that something is wrong with the input to the request. For example, // a required parameter might be missing or out of range. type InvalidRequestException struct { @@ -1731,6 +1842,25 @@ type RegisterClientInput struct { // ClientType is a required field ClientType *string `locationName:"clientType" type:"string" required:"true"` + // This IAM Identity Center application ARN is used to define administrator-managed + // configuration for public client access to resources. At authorization, the + // scopes, grants, and redirect URI available to this client will be restricted + // by this application resource. + EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"` + + // The list of OAuth 2.0 grant types that are defined by the client. This list + // is used to restrict the token granting flows available to the client. + GrantTypes []*string `locationName:"grantTypes" type:"list"` + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string `locationName:"issuerUrl" type:"string"` + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent + // can be redirected back to. + RedirectUris []*string `locationName:"redirectUris" type:"list"` + // The list of scopes that are defined by the client. Upon authorization, this // list is used to restrict permissions when granting an access token. Scopes []*string `locationName:"scopes" type:"list"` @@ -1782,6 +1912,30 @@ func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { return s } +// SetEntitledApplicationArn sets the EntitledApplicationArn field's value. +func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput { + s.EntitledApplicationArn = &v + return s +} + +// SetGrantTypes sets the GrantTypes field's value. +func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput { + s.GrantTypes = v + return s +} + +// SetIssuerUrl sets the IssuerUrl field's value. +func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput { + s.IssuerUrl = &v + return s +} + +// SetRedirectUris sets the RedirectUris field's value. +func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput { + s.RedirectUris = v + return s +} + // SetScopes sets the Scopes field's value. func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { s.Scopes = v diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go index e6242e492..cadf4584d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -57,6 +57,13 @@ const ( // makes a CreateToken request with an invalid grant type. ErrCodeInvalidGrantException = "InvalidGrantException" + // ErrCodeInvalidRedirectUriException for service response error code + // "InvalidRedirectUriException". + // + // Indicates that one or more redirect URI in the request is not supported for + // this operation. + ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException" + // ErrCodeInvalidRequestException for service response error code // "InvalidRequestException". // @@ -106,6 +113,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidClientException": newErrorInvalidClientException, "InvalidClientMetadataException": newErrorInvalidClientMetadataException, "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRedirectUriException": newErrorInvalidRedirectUriException, "InvalidRequestException": newErrorInvalidRequestException, "InvalidRequestRegionException": newErrorInvalidRequestRegionException, "InvalidScopeException": newErrorInvalidScopeException, diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b..f4e7dbf37 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b..daea9dd6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e575754..fa854785d 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d..e4ac2a2ff 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd..c349c326c 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e4..36c311694 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a..d8de5ab76 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c01..5eb5dbc66 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d6..c54a63083 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc49..0760efe91 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 000000000..b0eab1009 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 000000000..928319fb0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 000000000..3186b0c34 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 000000000..f69fdb930 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 000000000..607e683bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 000000000..35c734be4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 000000000..e5b3b6f69 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 000000000..1dd455bc5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 000000000..f1b2e73bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 000000000..52bf4ce53 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 000000000..547df1df8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 000000000..7daa45e19 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 000000000..30976ce97 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 000000000..37dfeddc2 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 000000000..a72c64954 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae65..000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b88..f65e8fe3e 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78..a29fc7aab 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index fdff3fdb4..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "github.com/golang/protobuf/ptypes/any" -) - -const urlPrefix = "type.googleapis.com/" - -// AnyMessageName returns the message name contained in an anypb.Any message. -// Most type assertions should use the Is function instead. -// -// Deprecated: Call the any.MessageName method instead. -func AnyMessageName(any *anypb.Any) (string, error) { - name, err := anyMessageName(any) - return string(name), err -} -func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - name := protoreflect.FullName(any.TypeUrl) - if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return name, nil -} - -// MarshalAny marshals the given message m into an anypb.Any message. -// -// Deprecated: Call the anypb.New function instead. -func MarshalAny(m proto.Message) (*anypb.Any, error) { - switch dm := m.(type) { - case DynamicAny: - m = dm.Message - case *DynamicAny: - if dm == nil { - return nil, proto.ErrNil - } - m = dm.Message - } - b, err := proto.Marshal(m) - if err != nil { - return nil, err - } - return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil -} - -// Empty returns a new message of the type specified in an anypb.Any message. -// It returns protoregistry.NotFound if the corresponding message type could not -// be resolved in the global registry. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead -// to resolve the message name and create a new instance of it. -func Empty(any *anypb.Any) (proto.Message, error) { - name, err := anyMessageName(any) - if err != nil { - return nil, err - } - mt, err := protoregistry.GlobalTypes.FindMessageByName(name) - if err != nil { - return nil, err - } - return proto.MessageV1(mt.New().Interface()), nil -} - -// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message -// into the provided message m. It returns an error if the target message -// does not match the type in the Any message or if an unmarshal error occurs. -// -// The target message m may be a *DynamicAny message. If the underlying message -// type could not be resolved, then this returns protoregistry.NotFound. -// -// Deprecated: Call the any.UnmarshalTo method instead. -func UnmarshalAny(any *anypb.Any, m proto.Message) error { - if dm, ok := m.(*DynamicAny); ok { - if dm.Message == nil { - var err error - dm.Message, err = Empty(any) - if err != nil { - return err - } - } - m = dm.Message - } - - anyName, err := AnyMessageName(any) - if err != nil { - return err - } - msgName := proto.MessageName(m) - if anyName != msgName { - return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) - } - return proto.Unmarshal(any.Value, m) -} - -// Is reports whether the Any message contains a message of the specified type. -// -// Deprecated: Call the any.MessageIs method instead. -func Is(any *anypb.Any, m proto.Message) bool { - if any == nil || m == nil { - return false - } - name := proto.MessageName(m) - if !strings.HasSuffix(any.TypeUrl, name) { - return false - } - return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in an anypb.Any message. -// The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -// -// Deprecated: Use the any.UnmarshalNew method instead to unmarshal -// the any message contents into a new instance of the underlying message. -type DynamicAny struct{ proto.Message } - -func (m DynamicAny) String() string { - if m.Message == nil { - return "" - } - return m.Message.String() -} -func (m DynamicAny) Reset() { - if m.Message == nil { - return - } - m.Message.Reset() -} -func (m DynamicAny) ProtoMessage() { - return -} -func (m DynamicAny) ProtoReflect() protoreflect.Message { - if m.Message == nil { - return nil - } - return dynamicAny{proto.MessageReflect(m.Message)} -} - -type dynamicAny struct{ protoreflect.Message } - -func (m dynamicAny) Type() protoreflect.MessageType { - return dynamicAnyType{m.Message.Type()} -} -func (m dynamicAny) New() protoreflect.Message { - return dynamicAnyType{m.Message.Type()}.New() -} -func (m dynamicAny) Interface() protoreflect.ProtoMessage { - return DynamicAny{proto.MessageV1(m.Message.Interface())} -} - -type dynamicAnyType struct{ protoreflect.MessageType } - -func (t dynamicAnyType) New() protoreflect.Message { - return dynamicAny{t.MessageType.New()} -} -func (t dynamicAnyType) Zero() protoreflect.Message { - return dynamicAny{t.MessageType.Zero()} -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d33d..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index d3c33259d..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ptypes provides functionality for interacting with well-known types. -// -// Deprecated: Well-known types have specialized functionality directly -// injected into the generated packages for each message type. -// See the deprecation notice for each function for the suggested alternative. -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index b2b55dd85..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" -) - -// Range of google.protobuf.Duration as specified in duration.proto. -// This is about 10,000 years in seconds. -const ( - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// Duration converts a durationpb.Duration to a time.Duration. -// Duration returns an error if dur is invalid or overflows a time.Duration. -// -// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. -func Duration(dur *durationpb.Duration) (time.Duration, error) { - if err := validateDuration(dur); err != nil { - return 0, err - } - d := time.Duration(dur.Seconds) * time.Second - if int64(d/time.Second) != dur.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - if dur.Nanos != 0 { - d += time.Duration(dur.Nanos) * time.Nanosecond - if (d < 0) != (dur.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durationpb.Duration. -// -// Deprecated: Call the durationpb.New function instead. -func DurationProto(d time.Duration) *durationpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durationpb.Duration{ - Seconds: int64(secs), - Nanos: int32(nanos), - } -} - -// validateDuration determines whether the durationpb.Duration is valid -// according to the definition in google/protobuf/duration.proto. -// A valid durpb.Duration may still be too large to fit into a time.Duration -// Note that the range of durationpb.Duration is about 10,000 years, -// while the range of time.Duration is about 290 years. -func validateDuration(dur *durationpb.Duration) error { - if dur == nil { - return errors.New("duration: nil Duration") - } - if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", dur) - } - if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", dur) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee3e..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8368a3f70..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" -) - -// Range of google.protobuf.Duration as specified in timestamp.proto. -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// Timestamp converts a timestamppb.Timestamp to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return -// value is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -// -// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. -func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -// -// Deprecated: Call the timestamppb.Now function instead. -func TimestampNow() *timestamppb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -// -// Deprecated: Call the timestamppb.New function instead. -func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { - ts := ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. -// For invalid Timestamps, it returns an error message in parentheses. -// -// Deprecated: Call the ts.AsTime method instead, -// followed by a call to the Format method on the time.Time value. -func TimestampString(ts *timestamppb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) -// and has a Nanos field in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes the problem. -// -// Every valid Timestamp can be represented by a time.Time, -// but the converse is not true. -func validateTimestamp(ts *timestamppb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f80760..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/gnostic-models/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go index 250c81e8c..16ae66faa 100644 --- a/vendor/github.com/google/gnostic-models/compiler/extensions.go +++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -20,8 +20,8 @@ import ( "os/exec" "strings" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" yaml "gopkg.in/yaml.v3" extensions "github.com/google/gnostic-models/extensions" @@ -33,7 +33,7 @@ type ExtensionHandler struct { } // CallExtension calls a binary extension handler. -func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { +func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *anypb.Any, err error) { if context == nil || context.ExtensionHandlers == nil { return false, nil, nil } @@ -50,7 +50,7 @@ func CallExtension(context *Context, in *yaml.Node, extensionName string) (handl return handled, response, err } -func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { +func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*anypb.Any, error) { if extensionHandlers.Name != "" { yamlData, _ := yaml.Marshal(in) request := &extensions.ExtensionHandlerRequest{ diff --git a/vendor/github.com/google/gnostic-models/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go index ec8afd009..0768163e5 100644 --- a/vendor/github.com/google/gnostic-models/extensions/extensions.go +++ b/vendor/github.com/google/gnostic-models/extensions/extensions.go @@ -19,8 +19,8 @@ import ( "log" "os" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" ) type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) @@ -54,7 +54,7 @@ func Main(handler extensionHandler) { response.Errors = append(response.Errors, err.Error()) } else if handled { response.Handled = true - response.Value, err = ptypes.MarshalAny(output) + response.Value, err = anypb.New(output) if err != nil { response.Errors = append(response.Errors, err.Error()) } diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go new file mode 100644 index 000000000..5c2b63e26 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go @@ -0,0 +1,182 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.19.3 +// source: openapiv3/annotations.proto + +package openapi_v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Document)(nil), + Field: 1143, + Name: "openapi.v3.document", + Tag: "bytes,1143,opt,name=document", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1143, + Name: "openapi.v3.operation", + Tag: "bytes,1143,opt,name=operation", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.schema", + Tag: "bytes,1143,opt,name=schema", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.property", + Tag: "bytes,1143,opt,name=property", + Filename: "openapiv3/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional openapi.v3.Document document = 1143; + E_Document = &file_openapiv3_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // optional openapi.v3.Operation operation = 1143; + E_Operation = &file_openapiv3_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional openapi.v3.Schema schema = 1143; + E_Schema = &file_openapiv3_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional openapi.v3.Schema property = 1143; + E_Property = &file_openapiv3_annotations_proto_extTypes[3] +) + +var File_openapiv3_annotations_proto protoreflect.FileDescriptor + +var file_openapiv3_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, + 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x42, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_openapiv3_annotations_proto_goTypes = []interface{}{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*Document)(nil), // 4: openapi.v3.Document + (*Operation)(nil), // 5: openapi.v3.Operation + (*Schema)(nil), // 6: openapi.v3.Schema +} +var file_openapiv3_annotations_proto_depIdxs = []int32{ + 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions + 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions + 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions + 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions + 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document + 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation + 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema + 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 4, // [4:8] is the sub-list for extension type_name + 0, // [0:4] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_openapiv3_annotations_proto_init() } +func file_openapiv3_annotations_proto_init() { + if File_openapiv3_annotations_proto != nil { + return + } + file_openapiv3_OpenAPIv3_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv3_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_openapiv3_annotations_proto_goTypes, + DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, + ExtensionInfos: file_openapiv3_annotations_proto_extTypes, + }.Build() + File_openapiv3_annotations_proto = out.File + file_openapiv3_annotations_proto_rawDesc = nil + file_openapiv3_annotations_proto_goTypes = nil + file_openapiv3_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto new file mode 100644 index 000000000..09ee0aac5 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto @@ -0,0 +1,56 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package openapi.v3; + +import "google/protobuf/descriptor.proto"; +import "openapiv3/OpenAPIv3.proto"; + +// The Go package name. +option go_package = "./openapiv3;openapi_v3"; +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "AnnotationsProto"; +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v3"; +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +extend google.protobuf.FileOptions { + Document document = 1143; +} + +extend google.protobuf.MethodOptions { + Operation operation = 1143; +} + +extend google.protobuf.MessageOptions { + Schema schema = 1143; +} + +extend google.protobuf.FieldOptions { + Schema property = 1143; +} diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 860bb304c..8ce9d3cf3 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -122,6 +122,7 @@ func (p *Profile) preEncode() { } p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + p.docURLX = addString(strings, p.DocURL) p.stringTable = make([]string, len(strings)) for s, i := range strings { @@ -156,6 +157,7 @@ func (p *Profile) encode(b *buffer) { encodeInt64Opt(b, 12, p.Period) encodeInt64s(b, 13, p.commentX) encodeInt64(b, 14, p.defaultSampleTypeX) + encodeInt64Opt(b, 15, p.docURLX) } var profileDecoder = []decoder{ @@ -237,6 +239,8 @@ var profileDecoder = []decoder{ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, // int64 defaultSampleType = 14 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, + // string doc_link = 15; + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) }, } // postDecode takes the unexported fields populated by decode (with @@ -384,6 +388,7 @@ func (p *Profile) postDecode() error { p.commentX = nil p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.DocURL, err = getString(p.stringTable, &p.docURLX, err) p.stringTable = nil return err } diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index eee0132e7..ba4d74640 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -476,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { var timeNanos, durationNanos, period int64 var comments []string seenComments := map[string]bool{} + var docURL string var defaultSampleType string for _, s := range srcs { if timeNanos == 0 || s.TimeNanos < timeNanos { @@ -494,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { if defaultSampleType == "" { defaultSampleType = s.DefaultSampleType } + if docURL == "" { + docURL = s.DocURL + } } p := &Profile{ @@ -509,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { Comments: comments, DefaultSampleType: defaultSampleType, + DocURL: docURL, } copy(p.SampleType, srcs[0].SampleType) return p, nil diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 5551eb0bf..f47a24390 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -39,6 +39,7 @@ type Profile struct { Location []*Location Function []*Function Comments []string + DocURL string DropFrames string KeepFrames string @@ -53,6 +54,7 @@ type Profile struct { encodeMu sync.Mutex commentX []int64 + docURLX int64 dropFramesX int64 keepFramesX int64 stringTable []string @@ -555,6 +557,9 @@ func (p *Profile) String() string { for _, c := range p.Comments { ss = append(ss, "Comment: "+c) } + if url := p.DocURL; url != "" { + ss = append(ss, fmt.Sprintf("Doc: %s", url)) + } if pt := p.PeriodType; pt != nil { ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) } @@ -844,7 +849,7 @@ func (p *Profile) HasFileLines() bool { // Unsymbolizable returns true if a mapping points to a binary for which // locations can't be symbolized in principle, at least now. Examples are -// "[vdso]", [vsyscall]" and some others, see the code. +// "[vdso]", "[vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" diff --git a/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md index 41f9017aa..efe2d9f6a 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md @@ -1,3 +1,16 @@ +## v2.2.0 (2024-10-18) + +* [GH-3176](https://github.com/gophercloud/gophercloud/pull/3176) [v2] [containerinfra]: add "MasterLBEnabled" in Cluster results +* [GH-3181](https://github.com/gophercloud/gophercloud/pull/3181) [v2] octavia: add new options to health monitors +* [GH-3182](https://github.com/gophercloud/gophercloud/pull/3182) [v2] octavia: add new security options to pools and listeners +* [GH-3195](https://github.com/gophercloud/gophercloud/pull/3195) [v2] [core]: handle empty response body +* [GH-3196](https://github.com/gophercloud/gophercloud/pull/3196) [v2] [fwaas_v2]: proper ParseResponse handling +* [GH-3198](https://github.com/gophercloud/gophercloud/pull/3198) [v2] compute: Fix expected and actual test results +* [GH-3199](https://github.com/gophercloud/gophercloud/pull/3199) [v2] [octavia] add an ability to filter flavors and flavorprofiles +* [GH-3214](https://github.com/gophercloud/gophercloud/pull/3214) [v2] [manila] add scheduler_hints to the shares CreateOpts +* [GH-3215](https://github.com/gophercloud/gophercloud/pull/3215) [v2] [manila] add share_group_id to share's CreateOpts +* [GH-3219](https://github.com/gophercloud/gophercloud/pull/3219) [v2] docs: Remove outdated godoc + ## v2.1.1 (2024-09-18) * [GH-3161](https://github.com/gophercloud/gophercloud/pull/3161) [v2] fix: create security group rule with any protocol diff --git a/vendor/github.com/gophercloud/gophercloud/v2/README.md b/vendor/github.com/gophercloud/gophercloud/v2/README.md index e9ba39bb7..ca47f5b0b 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/README.md +++ b/vendor/github.com/gophercloud/gophercloud/v2/README.md @@ -1,8 +1,6 @@ # Gophercloud: an OpenStack SDK for Go [![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master) -[Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud/v2) - Gophercloud is a Go SDK for OpenStack. Join us on kubernetes slack, on [#gophercloud](https://kubernetes.slack.com/archives/C05G4NJ6P6X). Visit [slack.k8s.io](https://slack.k8s.io) for an invitation. diff --git a/vendor/github.com/gophercloud/gophercloud/v2/doc.go b/vendor/github.com/gophercloud/gophercloud/v2/doc.go deleted file mode 100644 index a755ecb18..000000000 --- a/vendor/github.com/gophercloud/gophercloud/v2/doc.go +++ /dev/null @@ -1,148 +0,0 @@ -/* -Package gophercloud provides a multi-vendor interface to OpenStack-compatible -clouds. The library has a three-level hierarchy: providers, services, and -resources. - -# Authenticating with Providers - -Provider structs represent the cloud providers that offer and manage a -collection of services. You will generally want to create one Provider -client per OpenStack cloud. - - It is now recommended to use the `clientconfig` package found at - https://github.com/gophercloud/utils/tree/master/openstack/clientconfig - for all authentication purposes. - - The below documentation is still relevant. clientconfig simply implements - the below and presents it in an easier and more flexible way. - -Use your OpenStack credentials to create a Provider client. The -IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in -information provided by the cloud operator. Additionally, the cloud may refer to -TenantID or TenantName as project_id and project_name. Credentials are -specified like so: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(context.TODO(), opts) - -You can authenticate with a token by doing: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - TokenID: "{token_id}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(context.TODO(), opts) - -You may also use the openstack.AuthOptionsFromEnv() helper function. This -function reads in standard environment variables frequently found in an -OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" -instead of "project". - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(context.TODO(), opts) - -# Service Clients - -Service structs are specific to a provider and handle all of the logic and -operations for a particular OpenStack service. Examples of services include: -Compute, Object Storage, Block Storage. In order to define one, you need to -pass in the parent provider, like so: - - opts := gophercloud.EndpointOpts{Region: "RegionOne"} - - client, err := openstack.NewComputeV2(provider, opts) - -# Resources - -Resource structs are the domain models that services make use of in order -to work with and represent the state of API resources: - - server, err := servers.Get(context.TODO(), client, "{serverId}").Extract() - -Intermediate Result structs are returned for API operations, which allow -generic access to the HTTP headers, response body, and any errors associated -with the network transaction. To turn a result into a usable resource struct, -you must call the Extract method which is chained to the response, or an -Extract function from an applicable extension: - - result := servers.Get(context.TODO(), client, "{serverId}") - - // Attempt to extract the disk configuration from the OS-DCF disk config - // extension: - config, err := diskconfig.ExtractGet(result) - -All requests that enumerate a collection return a Pager struct that is used to -iterate through the results one page at a time. Use the EachPage method on that -Pager to handle each successive Page in a closure, then use the appropriate -extraction method from that request's package to interpret that Page as a slice -of results: - - err := servers.List(client, nil).EachPage(context.TODO(), func (_ context.Context, page pagination.Page) (bool, error) { - s, err := servers.ExtractServers(page) - if err != nil { - return false, err - } - - // Handle the []servers.Server slice. - - // Return "false" or an error to prematurely stop fetching new pages. - return true, nil - }) - -If you want to obtain the entire collection of pages without doing any -intermediary processing on each page, you can use the AllPages method: - - allPages, err := servers.List(client, nil).AllPages(context.TODO()) - allServers, err := servers.ExtractServers(allPages) - -This top-level package contains utility functions and data types that are used -throughout the provider and service packages. Of particular note for end users -are the AuthOptions and EndpointOpts structs. - -An example retry backoff function, which respects the 429 HTTP response code and a "Retry-After" header: - - endpoint := "http://localhost:5000" - provider, err := openstack.NewClient(endpoint) - if err != nil { - panic(err) - } - provider.MaxBackoffRetries = 3 // max three retries - provider.RetryBackoffFunc = func(ctx context.Context, respErr *ErrUnexpectedResponseCode, e error, retries uint) error { - retryAfter := respErr.ResponseHeader.Get("Retry-After") - if retryAfter == "" { - return e - } - - var sleep time.Duration - - // Parse delay seconds or HTTP date - if v, err := strconv.ParseUint(retryAfter, 10, 32); err == nil { - sleep = time.Duration(v) * time.Second - } else if v, err := time.Parse(http.TimeFormat, retryAfter); err == nil { - sleep = time.Until(v) - } else { - return e - } - - if ctx != nil { - select { - case <-time.After(sleep): - case <-ctx.Done(): - return e - } - } else { - time.Sleep(sleep) - } - - return nil - } -*/ -package gophercloud diff --git a/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go b/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go index e2cd3c2da..007266f63 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go @@ -13,7 +13,7 @@ import ( // DefaultUserAgent is the default User-Agent string set in the request header. const ( - DefaultUserAgent = "gophercloud/v2.1.1" + DefaultUserAgent = "gophercloud/v2.2.0" DefaultMaxBackoffRetries = 60 ) diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 000000000..402433593 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 000000000..d31b37815 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 000000000..4528059ca --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,123 @@ +version: 2 + +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + version_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 000000000..87d557477 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 000000000..de264c85a --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,721 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
+ +
+ See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +To disable all assembly add `-tags=noasm`. This works across all packages. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +```go + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 000000000..ca6685e2b --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 000000000..ea5a692d5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 000000000..ea7324da6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 000000000..f65eb3909 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 000000000..e82fa3bb7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,167 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 000000000..abade2d60 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 000000000..074018d8f --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + s.bw.close() + return nil +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 000000000..535cbadfd --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 000000000..aff942205 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 000000000..b3d262958 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 000000000..8b6e5c663 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 000000000..e36d9742f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,229 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 000000000..0ebc9aaac --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,102 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 000000000..84aa3d12f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,742 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src), nil +} + +func (s *Scratch) compress1xDo(dst, src []byte) []byte { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + bw.close() + return bw.out +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + idx := len(s.Out) + s.Out = s.compress1xDo(s.Out, toDo) + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 000000000..0f56b02d7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 0 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errors, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 000000000..ba7e8e6b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 000000000..c4c7ab2d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 000000000..908c17de6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 000000000..77ecd68e0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + srcLen int + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.srcLen = len(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 000000000..3954c5121 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 000000000..e802579c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 000000000..4465fbe9e --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 000000000..6050c10f4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 000000000..40796a49d --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 000000000..77395a6b8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 000000000..13c6040a5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 000000000..2754bac6f --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 000000000..34d01f4aa --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 000000000..5a4412f90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.19 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 000000000..92e2347bb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 000000000..25ca98394 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + v := b.in[len(b.in)-8:] + b.in = b.in[:len(b.in)-8] + b.value = binary.LittleEndian.Uint64(v) + b.bitsRead = 0 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if len(b.in) >= 4 { + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + return + } + + b.bitsRead -= uint8(8 * len(b.in)) + for len(b.in) > 0 { + b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) + b.in = b.in[:len(b.in)-1] + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return len(b.in) == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 000000000..1952f175b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,112 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 000000000..9c28840c3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,731 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to 0x%x, code: %v", symb, v) + } + case compModeFSE: + if debugDecoder { + println("Reading table for", tableIndex(i)) + } + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 000000000..32a7f401d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,909 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 16 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 6) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 16 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() + b.litEnc.Reuse = huff0.ReusePolicyNone + return nil + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 000000000..01a01e486 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 000000000..55a388553 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := io.ReadFull(r.r, r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 000000000..0e59a242d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 000000000..6a5a2988b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,261 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { + *h = Header{} + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return nil, ErrMagicMismatch + } + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return in[4:], nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return nil, errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return in, nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return in, nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 000000000..bbca17234 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,948 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + dicts map[uint32]*dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + d.current.crc.Write(next.b) + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 000000000..774c5f00f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,169 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 000000000..b7b83164b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,565 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if int(offset) >= len(o.History) { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := litTotal + if avgSize > huff0.BlockSizeMax/2 { + avgSize = huff0.BlockSizeMax / 2 + } + huffBuff := make([]byte, 0, avgSize) + // Target size + div := litTotal / avgSize + if div < 1 { + div = 1 + } + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := 0; tries < 255; tries++ { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := 0; i < 128; i++ { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 000000000..5ca46038a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,173 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + e.blk.dictLitEnc = nil + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 000000000..4613724e9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,560 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = maxMatchLen * 8 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep) & 3) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 250 + + cv := load6432(src, s) + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + return + } + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if m.rep <= 0 { + // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + continue + } + + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) + } + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + } + + if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + s = best.s + if best.rep > 0 { + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + addLiterals(&seq, best.s) + + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) + if debugSequences { + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) + } + blk.sequences = append(blk.sequences, seq) + + // Index old s + 1 -> s - 1 + s = best.s + best.length + nextEmit = s + + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + + switch best.rep { + case 2, 4 | 1: + offset1, offset2 = offset2, offset1 + case 3, 4 | 2: + offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Write our sequence + var seq seq + l := best.length + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 + } + + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + off++ + } + if s >= sLimit { + break encodeLoop + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 000000000..84a79fde7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1252 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 000000000..d36be7bd8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 000000000..f45a3da7d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,891 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 2 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 000000000..8f8223cd3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,642 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst := fh.appendTo(tmp[:0]) + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + if final { + s.eofWritten = true + } + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst = fh.appendTo(dst) + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + oldout := blk.output + // Output directly to dst + blk.output = dst + + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = blk.output + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = append(dst, blk.output...) + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 000000000..20671dcb9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,339 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: false, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level and max 8MB. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + n = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 8 << 20 + case SpeedBestCompression: + o.windowSize = 8 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedDefault + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 000000000..e47af66e7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,415 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + case nil: + signature[0] = b[0] + default: + return err + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + case nil: + copy(signature[1:], b) + default: + return err + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum, assuming the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC skips over the checksum, assuming the frame has one. +func (d *frameDec) consumeCRC() error { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + } + return err +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 000000000..667ca0679 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) []byte { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are not stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 000000000..2f8860a72 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 000000000..d04a829b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 000000000..bcde39869 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 000000000..8adfebb02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,73 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + v = 1 + } + symbolNext[i] = uint16(v) + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + for { + // lowprob area + position = (position + step) & tableMask + if position <= highThreshold { + break + } + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 000000000..ab26326a8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 000000000..474cb77d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 000000000..5d73c21eb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 000000000..09164856d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 000000000..24b53065f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 000000000..777290d44 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 000000000..fc40c8200 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 000000000..ddb63aa91 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 000000000..ae7d4d329 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(s *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD s+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 000000000..d4221edf4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 000000000..0be16cefc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 000000000..6f3b0cb10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 000000000..f41932b7a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 000000000..0782b86e3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,66 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SHRL $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 000000000..57b9c31c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 000000000..d7fe6d82d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,503 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fill() + } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 000000000..c59f17e07 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,394 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + "io" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorOverread: + return true, io.ErrUnexpectedEOF + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// error reported when bits are overread. +const errorOverread = 6 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 000000000..f5591fa1e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4151 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 000000000..2fb35b788 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 000000000..8014174a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 000000000..ec13594e8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,434 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + var n int + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 000000000..29c15c8c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 000000000..066bef2a4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,125 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/kylelemons/godebug/LICENSE b/vendor/github.com/kylelemons/godebug/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kylelemons/godebug/diff/diff.go b/vendor/github.com/kylelemons/godebug/diff/diff.go new file mode 100644 index 000000000..200e596c6 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/diff/diff.go @@ -0,0 +1,186 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package diff implements a linewise diff algorithm. +package diff + +import ( + "bytes" + "fmt" + "strings" +) + +// Chunk represents a piece of the diff. A chunk will not have both added and +// deleted lines. Equal lines are always after any added or deleted lines. +// A Chunk may or may not have any lines in it, especially for the first or last +// chunk in a computation. +type Chunk struct { + Added []string + Deleted []string + Equal []string +} + +func (c *Chunk) empty() bool { + return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0 +} + +// Diff returns a string containing a line-by-line unified diff of the linewise +// changes required to make A into B. Each line is prefixed with '+', '-', or +// ' ' to indicate if it should be added, removed, or is correct respectively. +func Diff(A, B string) string { + aLines := strings.Split(A, "\n") + bLines := strings.Split(B, "\n") + + chunks := DiffChunks(aLines, bLines) + + buf := new(bytes.Buffer) + for _, c := range chunks { + for _, line := range c.Added { + fmt.Fprintf(buf, "+%s\n", line) + } + for _, line := range c.Deleted { + fmt.Fprintf(buf, "-%s\n", line) + } + for _, line := range c.Equal { + fmt.Fprintf(buf, " %s\n", line) + } + } + return strings.TrimRight(buf.String(), "\n") +} + +// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm +// to compute the edits required from A to B and returns the +// edit chunks. +func DiffChunks(a, b []string) []Chunk { + // algorithm: http://www.xmailserver.org/diff2.pdf + + // We'll need these quantities a lot. + alen, blen := len(a), len(b) // M, N + + // At most, it will require len(a) deletions and len(b) additions + // to transform a into b. + maxPath := alen + blen // MAX + if maxPath == 0 { + // degenerate case: two empty lists are the same + return nil + } + + // Store the endpoint of the path for diagonals. + // We store only the a index, because the b index on any diagonal + // (which we know during the loop below) is aidx-diag. + // endpoint[maxPath] represents the 0 diagonal. + // + // Stated differently: + // endpoint[d] contains the aidx of a furthest reaching path in diagonal d + endpoint := make([]int, 2*maxPath+1) // V + + saved := make([][]int, 0, 8) // Vs + save := func() { + dup := make([]int, len(endpoint)) + copy(dup, endpoint) + saved = append(saved, dup) + } + + var editDistance int // D +dLoop: + for editDistance = 0; editDistance <= maxPath; editDistance++ { + // The 0 diag(onal) represents equality of a and b. Each diagonal to + // the left is numbered one lower, to the right is one higher, from + // -alen to +blen. Negative diagonals favor differences from a, + // positive diagonals favor differences from b. The edit distance to a + // diagonal d cannot be shorter than d itself. + // + // The iterations of this loop cover either odds or evens, but not both, + // If odd indices are inputs, even indices are outputs and vice versa. + for diag := -editDistance; diag <= editDistance; diag += 2 { // k + var aidx int // x + switch { + case diag == -editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath-editDistance+1] + 0 + case diag == editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath+editDistance-1] + 1 + case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]: + // diagonal d+1 was farther along, so use that + aidx = endpoint[maxPath+diag+1] + 0 + default: + // diagonal d-1 was farther (or the same), so use that + aidx = endpoint[maxPath+diag-1] + 1 + } + // On diagonal d, we can compute bidx from aidx. + bidx := aidx - diag // y + // See how far we can go on this diagonal before we find a difference. + for aidx < alen && bidx < blen && a[aidx] == b[bidx] { + aidx++ + bidx++ + } + // Store the end of the current edit chain. + endpoint[maxPath+diag] = aidx + // If we've found the end of both inputs, we're done! + if aidx >= alen && bidx >= blen { + save() // save the final path + break dLoop + } + } + save() // save the current path + } + if editDistance == 0 { + return nil + } + chunks := make([]Chunk, editDistance+1) + + x, y := alen, blen + for d := editDistance; d > 0; d-- { + endpoint := saved[d] + diag := x - y + insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1]) + + x1 := endpoint[maxPath+diag] + var x0, xM, kk int + if insert { + kk = diag + 1 + x0 = endpoint[maxPath+kk] + xM = x0 + } else { + kk = diag - 1 + x0 = endpoint[maxPath+kk] + xM = x0 + 1 + } + y0 := x0 - kk + + var c Chunk + if insert { + c.Added = b[y0:][:1] + } else { + c.Deleted = a[x0:][:1] + } + if xM < x1 { + c.Equal = a[xM:][:x1-xM] + } + + x, y = x0, y0 + chunks[d] = c + } + if x > 0 { + chunks[0].Equal = a[:x] + } + if chunks[0].empty() { + chunks = chunks[1:] + } + if len(chunks) == 0 { + return nil + } + return chunks +} diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 62af14ad2..b797577ff 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,62 @@ +## 1.36.0 + +### Features +- new: make collection-related matchers Go 1.23 iterator aware [4c964c6] + +### Maintenance +- Replace min/max helpers with built-in min/max [ece6872] +- Fix some typos in docs [8e924d7] + +## 1.35.1 + +### Fixes +- Export EnforceDefaultTimeoutsWhenUsingContexts and DisableDefaultTimeoutsWhenUsingContext [ca36da1] + +## 1.35.0 + +### Features + +- You can now call `EnforceDefaultTimeoutsWhenUsingContexts()` to have `Eventually` honor the default timeout when passed a context. (prior to this you had to expclility add a timeout) [e4c4265] +- You can call `StopTrying(message).Successfully()` to abort a `Consistently` early without failure [eeca931] + +### Fixes + +- Stop memoizing the result of `HaveField` to avoid unexpected errors when used with async assertions. [3bdbc4e] + +### Maintenance + +- Bump all dependencies [a05a416] + +## 1.34.2 + +Require Go 1.22+ + +### Maintenance +- bump ginkgo as well [c59c6dc] +- bump to go 1.22 - remove x/exp dependency [8158b99] + +## 1.34.1 + +### Maintenance +- Use slices from exp/slices to keep golang 1.20 compat [5e71dcd] + +## 1.34.0 + +### Features +- Add RoundTripper method to ghttp.Server [c549e0d] + +### Fixes +- fix incorrect handling of nil slices in HaveExactElements (fixes #771) [878940c] +- issue_765 - fixed bug in Hopcroft-Karp algorithm [ebadb67] + +### Maintenance +- bump ginkgo [8af2ece] +- Fix typo in docs [123a071] +- Bump github.com/onsi/ginkgo/v2 from 2.17.2 to 2.17.3 (#756) [0e69083] +- Bump google.golang.org/protobuf from 1.33.0 to 1.34.1 (#755) [2675796] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#754) [4160c0f] +- Bump github-pages from 230 to 231 in /docs (#748) [892c303] + ## 1.33.1 ### Fixes diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 9697d5134..eb74f6f6a 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.33.1" +const GOMEGA_VERSION = "1.36.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -319,7 +319,19 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) }, SpecTimeout(time.Second)) -Either way the context passd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. +Either way the context pasesd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. + +By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example: + + Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17)) + +now either the context cacnellation or the timeout will cause Eventually to stop polling. + +If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call: + + EnforceDefaultTimeoutsWhenUsingContexts() + +in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if eitehr the context is cancelled or the default timeout elapses. **Category 3: Making assertions _in_ the function passed into Eventually** @@ -491,6 +503,16 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { Default.SetDefaultConsistentlyPollingInterval(t) } +// EnforceDefaultTimeoutsWhenUsingContexts forces `Eventually` to apply a default timeout even when a context is provided. +func EnforceDefaultTimeoutsWhenUsingContexts() { + Default.EnforceDefaultTimeoutsWhenUsingContexts() +} + +// DisableDefaultTimeoutsWhenUsingContext disables the default timeout when a context is provided to `Eventually`. +func DisableDefaultTimeoutsWhenUsingContext() { + Default.DisableDefaultTimeoutsWhenUsingContext() +} + // AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against // the matcher passed to the Should and ShouldNot methods. // diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index cde9e2ec8..8b4cd1f5b 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -335,7 +335,7 @@ func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time { if assertion.asyncType == AsyncAssertionTypeConsistently { return time.After(assertion.g.DurationBundle.ConsistentlyDuration) } else { - if assertion.ctx == nil { + if assertion.ctx == nil || assertion.g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts { return time.After(assertion.g.DurationBundle.EventuallyTimeout) } else { return nil @@ -496,7 +496,15 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch for _, err := range []error{actualErr, matcherErr} { if pollingSignalErr, ok := AsPollingSignalError(err); ok { if pollingSignalErr.IsStopTrying() { - fail("Told to stop trying") + if pollingSignalErr.IsSuccessful() { + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("Told to stop trying (and ignoring call to Successfully(), as it is only relevant with Consistently)") + } else { + return true // early escape hatch for Consistently + } + } else { + fail("Told to stop trying") + } return false } if pollingSignalErr.IsTryAgainAfter() { diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go index 6e0d90d3a..2e026c336 100644 --- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -8,10 +8,11 @@ import ( ) type DurationBundle struct { - EventuallyTimeout time.Duration - EventuallyPollingInterval time.Duration - ConsistentlyDuration time.Duration - ConsistentlyPollingInterval time.Duration + EventuallyTimeout time.Duration + EventuallyPollingInterval time.Duration + ConsistentlyDuration time.Duration + ConsistentlyPollingInterval time.Duration + EnforceDefaultTimeoutsWhenUsingContexts bool } const ( @@ -20,15 +21,19 @@ const ( ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION" ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL" + + EnforceDefaultTimeoutsWhenUsingContextsEnvVarName = "GOMEGA_ENFORCE_DEFAULT_TIMEOUTS_WHEN_USING_CONTEXTS" ) func FetchDefaultDurationBundle() DurationBundle { + _, EnforceDefaultTimeoutsWhenUsingContexts := os.LookupEnv(EnforceDefaultTimeoutsWhenUsingContextsEnvVarName) return DurationBundle{ EventuallyTimeout: durationFromEnv(EventuallyTimeoutEnvVarName, time.Second), EventuallyPollingInterval: durationFromEnv(EventuallyPollingIntervalEnvVarName, 10*time.Millisecond), - ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), - ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), + ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + EnforceDefaultTimeoutsWhenUsingContexts: EnforceDefaultTimeoutsWhenUsingContexts, } } diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index de1f4f336..c6e2fcc0e 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -127,3 +127,11 @@ func (g *Gomega) SetDefaultConsistentlyDuration(t time.Duration) { func (g *Gomega) SetDefaultConsistentlyPollingInterval(t time.Duration) { g.DurationBundle.ConsistentlyPollingInterval = t } + +func (g *Gomega) EnforceDefaultTimeoutsWhenUsingContexts() { + g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = true +} + +func (g *Gomega) DisableDefaultTimeoutsWhenUsingContext() { + g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = false +} diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go index 83b04b1a4..3a4f7ddd9 100644 --- a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go +++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -17,6 +17,7 @@ type PollingSignalError interface { error Wrap(err error) PollingSignalError Attach(description string, obj any) PollingSignalError + Successfully() PollingSignalError Now() } @@ -45,6 +46,7 @@ type PollingSignalErrorImpl struct { wrappedErr error pollingSignalErrorType PollingSignalErrorType duration time.Duration + successful bool Attachments []PollingSignalErrorAttachment } @@ -73,6 +75,11 @@ func (s *PollingSignalErrorImpl) Unwrap() error { return s.wrappedErr } +func (s *PollingSignalErrorImpl) Successfully() PollingSignalError { + s.successful = true + return s +} + func (s *PollingSignalErrorImpl) Now() { panic(s) } @@ -81,6 +88,10 @@ func (s *PollingSignalErrorImpl) IsStopTrying() bool { return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying } +func (s *PollingSignalErrorImpl) IsSuccessful() bool { + return s.successful +} + func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool { return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter } diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go index 527c1a1c1..bd7f0b96e 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -4,17 +4,31 @@ package matchers import ( "fmt" + "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type BeEmptyMatcher struct { } func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { + // short-circuit the iterator case, as we only need to see the first + // element, if any. + if miter.IsIter(actual) { + var length int + if miter.IsSeq2(actual) { + miter.IterateKV(actual, func(k, v reflect.Value) bool { length++; return false }) + } else { + miter.IterateV(actual, func(v reflect.Value) bool { length++; return false }) + } + return length == 0, nil + } + length, ok := lengthOf(actual) if !ok { - return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1)) } return length == 0, nil diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go index f69037a4f..a11188182 100644 --- a/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -7,6 +7,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" ) @@ -17,8 +18,8 @@ type ConsistOfMatcher struct { } func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } matchers := matchers(matcher.Elements) @@ -60,10 +61,21 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { } func flatten(elems []interface{}) []interface{} { - if len(elems) != 1 || !isArrayOrSlice(elems[0]) { + if len(elems) != 1 || + !(isArrayOrSlice(elems[0]) || + (miter.IsIter(elems[0]) && !miter.IsSeq2(elems[0]))) { return elems } + if miter.IsIter(elems[0]) { + flattened := []any{} + miter.IterateV(elems[0], func(v reflect.Value) bool { + flattened = append(flattened, v.Interface()) + return true + }) + return flattened + } + value := reflect.ValueOf(elems[0]) flattened := make([]interface{}, value.Len()) for i := 0; i < value.Len(); i++ { @@ -116,7 +128,19 @@ func presentable(elems []interface{}) interface{} { func valuesOf(actual interface{}) []interface{} { value := reflect.ValueOf(actual) values := []interface{}{} - if isMap(actual) { + if miter.IsIter(actual) { + if miter.IsSeq2(actual) { + miter.IterateKV(actual, func(k, v reflect.Value) bool { + values = append(values, v.Interface()) + return true + }) + } else { + miter.IterateV(actual, func(v reflect.Value) bool { + values = append(values, v.Interface()) + return true + }) + } + } else if isMap(actual) { keys := value.MapKeys() for i := 0; i < value.Len(); i++ { values = append(values, value.MapIndex(keys[i]).Interface()) diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go index 3d45c9ebc..830239c7b 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -8,6 +8,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type ContainElementMatcher struct { @@ -16,16 +17,18 @@ type ContainElementMatcher struct { } func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("ContainElement matcher expects an array/slice/map/iterator. Got:\n%s", format.Object(actual, 1)) } var actualT reflect.Type var result reflect.Value - switch l := len(matcher.Result); { - case l > 1: + switch numResultArgs := len(matcher.Result); { + case numResultArgs > 1: return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at") - case l == 1: + case numResultArgs == 1: + // Check the optional result arg to point to a single value/array/slice/map + // of a type compatible with the actual value. if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr { return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s", format.Object(matcher.Result[0], 1)) @@ -34,93 +37,209 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e resultReference := matcher.Result[0] result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings switch result.Kind() { - case reflect.Array: + case reflect.Array: // result arrays are not supported, as they cannot be dynamically sized. + if miter.IsIter(actual) { + _, actualvT := miter.IterKVTypes(actual) + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.SliceOf(actualvT), result.Type().String()) + } return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", reflect.SliceOf(actualT.Elem()).String(), result.Type().String()) - case reflect.Slice: - if !isArrayOrSlice(actual) { + + case reflect.Slice: // result slice + // can we assign elements in actual to elements in what the result + // arg points to? + // - ✔ actual is an array or slice + // - ✔ actual is an iter.Seq producing "v" elements + // - ✔ actual is an iter.Seq2 producing "v" elements, ignoring + // the "k" elements. + switch { + case isArrayOrSlice(actual): + if !actualT.Elem().AssignableTo(result.Type().Elem()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + + case miter.IsIter(actual): + _, actualvT := miter.IterKVTypes(actual) + if !actualvT.AssignableTo(result.Type().Elem()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualvT.String(), result.Type().String()) + } + + default: // incompatible result reference return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String()) } - if !actualT.Elem().AssignableTo(result.Type().Elem()) { - return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", - actualT.String(), result.Type().String()) - } - case reflect.Map: - if !isMap(actual) { - return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", - actualT.String(), result.Type().String()) - } - if !actualT.AssignableTo(result.Type()) { + + case reflect.Map: // result map + // can we assign elements in actual to elements in what the result + // arg points to? + // - ✔ actual is a map + // - ✔ actual is an iter.Seq2 (iter.Seq doesn't fit though) + switch { + case isMap(actual): + if !actualT.AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + + case miter.IsIter(actual): + actualkT, actualvT := miter.IterKVTypes(actual) + if actualkT == nil { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.SliceOf(actualvT).String(), result.Type().String()) + } + if !reflect.MapOf(actualkT, actualvT).AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.MapOf(actualkT, actualvT), result.Type().String()) + } + + default: // incompatible result reference return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", actualT.String(), result.Type().String()) } + default: - if !actualT.Elem().AssignableTo(result.Type()) { - return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", - actualT.Elem().String(), result.Type().String()) + // can we assign a (single) element in actual to what the result arg + // points to? + switch { + case miter.IsIter(actual): + _, actualvT := miter.IterKVTypes(actual) + if !actualvT.AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualvT.String(), result.Type().String()) + } + default: + if !actualT.Elem().AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.Elem().String(), result.Type().String()) + } } } } + // If the supplied matcher isn't an Omega matcher, default to the Equal + // matcher. elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) if !elementIsMatcher { elemMatcher = &EqualMatcher{Expected: matcher.Element} } value := reflect.ValueOf(actual) - var valueAt func(int) interface{} - var getFindings func() reflect.Value - var foundAt func(int) + var getFindings func() reflect.Value // abstracts how the findings are collected and stored + var lastError error - if isMap(actual) { - keys := value.MapKeys() - valueAt = func(i int) interface{} { - return value.MapIndex(keys[i]).Interface() + if !miter.IsIter(actual) { + var valueAt func(int) interface{} + var foundAt func(int) + // We're dealing with an array/slice/map, so in all cases we can iterate + // over the elements in actual using indices (that can be considered + // keys in case of maps). + if isMap(actual) { + keys := value.MapKeys() + valueAt = func(i int) interface{} { + return value.MapIndex(keys[i]).Interface() + } + if result.Kind() != reflect.Invalid { + fm := reflect.MakeMap(actualT) + getFindings = func() reflect.Value { return fm } + foundAt = func(i int) { + fm.SetMapIndex(keys[i], value.MapIndex(keys[i])) + } + } + } else { + valueAt = func(i int) interface{} { + return value.Index(i).Interface() + } + if result.Kind() != reflect.Invalid { + var fsl reflect.Value + if result.Kind() == reflect.Slice { + fsl = reflect.MakeSlice(result.Type(), 0, 0) + } else { + fsl = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) + } + getFindings = func() reflect.Value { return fsl } + foundAt = func(i int) { + fsl = reflect.Append(fsl, value.Index(i)) + } + } } - if result.Kind() != reflect.Invalid { - fm := reflect.MakeMap(actualT) - getFindings = func() reflect.Value { - return fm + + for i := 0; i < value.Len(); i++ { + elem := valueAt(i) + success, err := elemMatcher.Match(elem) + if err != nil { + lastError = err + continue } - foundAt = func(i int) { - fm.SetMapIndex(keys[i], value.MapIndex(keys[i])) + if success { + if result.Kind() == reflect.Invalid { + return true, nil + } + foundAt(i) } } } else { - valueAt = func(i int) interface{} { - return value.Index(i).Interface() - } + // We're dealing with an iterator as a first-class construct, so things + // are slightly different: there is no index defined as in case of + // arrays/slices/maps, just "ooooorder" + var found func(k, v reflect.Value) if result.Kind() != reflect.Invalid { - var f reflect.Value - if result.Kind() == reflect.Slice { - f = reflect.MakeSlice(result.Type(), 0, 0) + if result.Kind() == reflect.Map { + fm := reflect.MakeMap(result.Type()) + getFindings = func() reflect.Value { return fm } + found = func(k, v reflect.Value) { fm.SetMapIndex(k, v) } } else { - f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) - } - getFindings = func() reflect.Value { - return f - } - foundAt = func(i int) { - f = reflect.Append(f, value.Index(i)) + var fsl reflect.Value + if result.Kind() == reflect.Slice { + fsl = reflect.MakeSlice(result.Type(), 0, 0) + } else { + fsl = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) + } + getFindings = func() reflect.Value { return fsl } + found = func(_, v reflect.Value) { fsl = reflect.Append(fsl, v) } } } - } - var lastError error - for i := 0; i < value.Len(); i++ { - elem := valueAt(i) - success, err := elemMatcher.Match(elem) - if err != nil { - lastError = err - continue + success := false + actualkT, _ := miter.IterKVTypes(actual) + if actualkT == nil { + miter.IterateV(actual, func(v reflect.Value) bool { + var err error + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + lastError = err + return true // iterate on... + } + if success { + if result.Kind() == reflect.Invalid { + return false // a match and no result needed, so we're done + } + found(reflect.Value{}, v) + } + return true // iterate on... + }) + } else { + miter.IterateKV(actual, func(k, v reflect.Value) bool { + var err error + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + lastError = err + return true // iterate on... + } + if success { + if result.Kind() == reflect.Invalid { + return false // a match and no result needed, so we're done + } + found(k, v) + } + return true // iterate on... + }) } - if success { - if result.Kind() == reflect.Invalid { - return true, nil - } - foundAt(i) + if success && result.Kind() == reflect.Invalid { + return true, nil } } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go index 946cd8bea..d9fcb8b80 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" ) @@ -13,8 +14,8 @@ type ContainElementsMatcher struct { } func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ContainElements matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("ContainElements matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } matchers := matchers(matcher.Elements) diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go index 025b6e1ac..4111f2b86 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go @@ -5,6 +5,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type HaveEachMatcher struct { @@ -12,8 +13,8 @@ type HaveEachMatcher struct { } func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s", + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("HaveEach matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } @@ -22,6 +23,38 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err elemMatcher = &EqualMatcher{Expected: matcher.Element} } + if miter.IsIter(actual) { + // rejecting the non-elements case works different for iterators as we + // don't want to fetch all elements into a slice first. + count := 0 + var success bool + var err error + if miter.IsSeq2(actual) { + miter.IterateKV(actual, func(k, v reflect.Value) bool { + count++ + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + return false + } + return success + }) + } else { + miter.IterateV(actual, func(v reflect.Value) bool { + count++ + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + return false + } + return success + }) + } + if count == 0 { + return false, fmt.Errorf("HaveEach matcher expects a non-empty iter.Seq/iter.Seq2. Got:\n%s", + format.Object(actual, 1)) + } + return success, err + } + value := reflect.ValueOf(actual) if value.Len() == 0 { return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s", @@ -40,7 +73,8 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err } } - // if there are no elements, then HaveEach will match. + // if we never failed then we succeed; the empty/nil cases have already been + // rejected above. for i := 0; i < value.Len(); i++ { success, err := elemMatcher.Match(valueAt(i)) if err != nil { diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go index dca5b9446..23799f1c6 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -2,8 +2,10 @@ package matchers import ( "fmt" + "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type mismatchFailure struct { @@ -21,24 +23,68 @@ type HaveExactElementsMatcher struct { func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) { matcher.resetState() - if isMap(actual) { - return false, fmt.Errorf("error") + if isMap(actual) || miter.IsSeq2(actual) { + return false, fmt.Errorf("HaveExactElements matcher doesn't work on map or iter.Seq2. Got:\n%s", format.Object(actual, 1)) } matchers := matchers(matcher.Elements) - values := valuesOf(actual) - lenMatchers := len(matchers) + + success = true + + if miter.IsIter(actual) { + // In the worst case, we need to see everything before we can give our + // verdict. The only exception is fast fail. + i := 0 + miter.IterateV(actual, func(v reflect.Value) bool { + if i >= lenMatchers { + // the iterator produces more values than we got matchers: this + // is not good. + matcher.extraIndex = i + success = false + return false + } + + elemMatcher := matchers[i].(omegaMatcher) + match, err := elemMatcher.Match(v.Interface()) + if err != nil { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: err.Error(), + }) + success = false + } else if !match { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: elemMatcher.FailureMessage(v.Interface()), + }) + success = false + } + i++ + return true + }) + if i < len(matchers) { + // the iterator produced less values than we got matchers: this is + // no good, no no no. + matcher.missingIndex = i + success = false + } + return success, nil + } + + values := valuesOf(actual) lenValues := len(values) for i := 0; i < lenMatchers || i < lenValues; i++ { if i >= lenMatchers { matcher.extraIndex = i + success = false continue } if i >= lenValues { matcher.missingIndex = i + success = false return } @@ -49,15 +95,17 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool index: i, failure: err.Error(), }) + success = false } else if !match { matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ index: i, failure: elemMatcher.FailureMessage(values[i]), }) + success = false } } - return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil + return success, nil } func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go index 6989f78c4..8dd3f871a 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -17,7 +17,7 @@ func (e missingFieldError) Error() string { return string(e) } -func extractField(actual interface{}, field string, matchername string) (interface{}, error) { +func extractField(actual interface{}, field string, matchername string) (any, error) { fields := strings.SplitN(field, ".", 2) actualValue := reflect.ValueOf(actual) @@ -64,36 +64,46 @@ func extractField(actual interface{}, field string, matchername string) (interfa type HaveFieldMatcher struct { Field string Expected interface{} +} - extractedField interface{} - expectedMatcher omegaMatcher +func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher { + var isMatcher bool + expectedMatcher, isMatcher := matcher.Expected.(omegaMatcher) + if !isMatcher { + expectedMatcher = &EqualMatcher{Expected: matcher.Expected} + } + return expectedMatcher } func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) { - matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField") + extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { return false, err } - var isMatcher bool - matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher) - if !isMatcher { - matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected} - } - - return matcher.expectedMatcher.Match(matcher.extractedField) + return matcher.expectedMatcher().Match(extractedField) } func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) { + extractedField, err := extractField(actual, matcher.Field, "HaveField") + if err != nil { + // this really shouldn't happen + return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err) + } message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field) - message += matcher.expectedMatcher.FailureMessage(matcher.extractedField) + message += matcher.expectedMatcher().FailureMessage(extractedField) return message } func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { + extractedField, err := extractField(actual, matcher.Field, "HaveField") + if err != nil { + // this really shouldn't happen + return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err) + } message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field) - message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField) + message += matcher.expectedMatcher().NegatedFailureMessage(extractedField) return message } diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go index 00cffec70..b62ee93cb 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -7,6 +7,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type HaveKeyMatcher struct { @@ -14,8 +15,8 @@ type HaveKeyMatcher struct { } func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { - if !isMap(actual) { - return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1)) + if !isMap(actual) && !miter.IsSeq2(actual) { + return false, fmt.Errorf("HaveKey matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1)) } keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) @@ -23,6 +24,20 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro keyMatcher = &EqualMatcher{Expected: matcher.Key} } + if miter.IsSeq2(actual) { + var success bool + var err error + miter.IterateKV(actual, func(k, v reflect.Value) bool { + success, err = keyMatcher.Match(k.Interface()) + if err != nil { + err = fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + return false + } + return !success + }) + return success, err + } + keys := reflect.ValueOf(actual).MapKeys() for i := 0; i < len(keys); i++ { success, err := keyMatcher.Match(keys[i].Interface()) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go index 4c5916804..3d608f63e 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -7,6 +7,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type HaveKeyWithValueMatcher struct { @@ -15,8 +16,8 @@ type HaveKeyWithValueMatcher struct { } func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { - if !isMap(actual) { - return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1)) + if !isMap(actual) && !miter.IsSeq2(actual) { + return false, fmt.Errorf("HaveKeyWithValue matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1)) } keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) @@ -29,6 +30,27 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, valueMatcher = &EqualMatcher{Expected: matcher.Value} } + if miter.IsSeq2(actual) { + var success bool + var err error + miter.IterateKV(actual, func(k, v reflect.Value) bool { + success, err = keyMatcher.Match(k.Interface()) + if err != nil { + err = fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + return false + } + if success { + success, err = valueMatcher.Match(v.Interface()) + if err != nil { + err = fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error()) + return false + } + } + return !success + }) + return success, err + } + keys := reflect.ValueOf(actual).MapKeys() for i := 0; i < len(keys); i++ { success, err := keyMatcher.Match(keys[i].Interface()) diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go index ee4276189..ca25713fe 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -13,7 +13,7 @@ type HaveLenMatcher struct { func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { length, ok := lengthOf(actual) if !ok { - return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1)) } return length == matcher.Count, nil diff --git a/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go new file mode 100644 index 000000000..d8837a4d0 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go @@ -0,0 +1,128 @@ +//go:build go1.23 + +package miter + +import ( + "reflect" +) + +// HasIterators always returns false for Go versions before 1.23. +func HasIterators() bool { return true } + +// IsIter returns true if the specified value is a function type that can be +// range-d over, otherwise false. +// +// We don't use reflect's CanSeq and CanSeq2 directly, as these would return +// true also for other value types that are range-able, such as integers, +// slices, et cetera. Here, we aim only at range-able (iterator) functions. +func IsIter(it any) bool { + if it == nil { // on purpose we only test for untyped nil. + return false + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func { + return false + } + return t.CanSeq() || t.CanSeq2() +} + +// IterKVTypes returns the reflection types of an iterator's yield function's K +// and optional V arguments, otherwise nil K and V reflection types. +func IterKVTypes(it any) (k, v reflect.Type) { + if it == nil { + return + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func { + return + } + // get the reflection types for V, and where applicable, K. + switch { + case t.CanSeq(): + v = t. /*iterator fn*/ In(0). /*yield fn*/ In(0) + case t.CanSeq2(): + yieldfn := t. /*iterator fn*/ In(0) + k = yieldfn.In(0) + v = yieldfn.In(1) + } + return +} + +// IsSeq2 returns true if the passed iterator function is compatible with +// iter.Seq2, otherwise false. +// +// IsSeq2 hides the Go 1.23+ specific reflect.Type.CanSeq2 behind a facade which +// is empty for Go versions before 1.23. +func IsSeq2(it any) bool { + if it == nil { + return false + } + t := reflect.TypeOf(it) + return t.Kind() == reflect.Func && t.CanSeq2() +} + +// isNilly returns true if v is either an untyped nil, or is a nil function (not +// necessarily an iterator function). +func isNilly(v any) bool { + if v == nil { + return true + } + rv := reflect.ValueOf(v) + return rv.Kind() == reflect.Func && rv.IsNil() +} + +// IterateV loops over the elements produced by an iterator function, passing +// the elements to the specified yield function individually and stopping only +// when either the iterator function runs out of elements or the yield function +// tell us to stop it. +// +// IterateV works very much like reflect.Value.Seq but hides the Go 1.23+ +// specific parts behind a facade which is empty for Go versions before 1.23, in +// order to simplify code maintenance for matchers when using older Go versions. +func IterateV(it any, yield func(v reflect.Value) bool) { + if isNilly(it) { + return + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func || !t.CanSeq() { + return + } + // Call the specified iterator function, handing it our adaptor to call the + // specified generic reflection yield function. + reflectedYield := reflect.MakeFunc( + t. /*iterator fn*/ In(0), + func(args []reflect.Value) []reflect.Value { + return []reflect.Value{reflect.ValueOf(yield(args[0]))} + }) + reflect.ValueOf(it).Call([]reflect.Value{reflectedYield}) +} + +// IterateKV loops over the key-value elements produced by an iterator function, +// passing the elements to the specified yield function individually and +// stopping only when either the iterator function runs out of elements or the +// yield function tell us to stop it. +// +// IterateKV works very much like reflect.Value.Seq2 but hides the Go 1.23+ +// specific parts behind a facade which is empty for Go versions before 1.23, in +// order to simplify code maintenance for matchers when using older Go versions. +func IterateKV(it any, yield func(k, v reflect.Value) bool) { + if isNilly(it) { + return + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func || !t.CanSeq2() { + return + } + // Call the specified iterator function, handing it our adaptor to call the + // specified generic reflection yield function. + reflectedYield := reflect.MakeFunc( + t. /*iterator fn*/ In(0), + func(args []reflect.Value) []reflect.Value { + return []reflect.Value{reflect.ValueOf(yield(args[0], args[1]))} + }) + reflect.ValueOf(it).Call([]reflect.Value{reflectedYield}) +} diff --git a/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go new file mode 100644 index 000000000..4b8fcc55b --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go @@ -0,0 +1,44 @@ +//go:build !go1.23 + +/* +Gomega matchers + +This package implements the Gomega matchers and does not typically need to be imported. +See the docs for Gomega for documentation on the matchers + +http://onsi.github.io/gomega/ +*/ + +package miter + +import "reflect" + +// HasIterators always returns false for Go versions before 1.23. +func HasIterators() bool { return false } + +// IsIter always returns false for Go versions before 1.23 as there is no +// iterator (function) pattern defined yet; see also: +// https://tip.golang.org/blog/range-functions. +func IsIter(i any) bool { return false } + +// IsSeq2 always returns false for Go versions before 1.23 as there is no +// iterator (function) pattern defined yet; see also: +// https://tip.golang.org/blog/range-functions. +func IsSeq2(it any) bool { return false } + +// IterKVTypes always returns nil reflection types for Go versions before 1.23 +// as there is no iterator (function) pattern defined yet; see also: +// https://tip.golang.org/blog/range-functions. +func IterKVTypes(i any) (k, v reflect.Type) { + return +} + +// IterateV never loops over what has been passed to it as an iterator for Go +// versions before 1.23 as there is no iterator (function) pattern defined yet; +// see also: https://tip.golang.org/blog/range-functions. +func IterateV(it any, yield func(v reflect.Value) bool) {} + +// IterateKV never loops over what has been passed to it as an iterator for Go +// versions before 1.23 as there is no iterator (function) pattern defined yet; +// see also: https://tip.golang.org/blog/range-functions. +func IterateKV(it any, yield func(k, v reflect.Value) bool) {} diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go index 1c54edd8f..44aa61d4b 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -1,6 +1,8 @@ package bipartitegraph import ( + "slices" + . "github.com/onsi/gomega/matchers/support/goraph/edge" . "github.com/onsi/gomega/matchers/support/goraph/node" "github.com/onsi/gomega/matchers/support/goraph/util" @@ -157,6 +159,11 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ if len(currentLayer) == 0 { return []NodeOrderedSet{} } + if done { // if last layer - into last layer must be only 'free' nodes + currentLayer = slices.DeleteFunc(currentLayer, func(in Node) bool { + return !matching.Free(in) + }) + } guideLayers = append(guideLayers, currentLayer) } diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go index dced2419e..b9440ac7a 100644 --- a/vendor/github.com/onsi/gomega/matchers/type_support.go +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -15,6 +15,8 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/onsi/gomega/matchers/internal/miter" ) type omegaMatcher interface { @@ -152,6 +154,17 @@ func lengthOf(a interface{}) (int, bool) { switch reflect.TypeOf(a).Kind() { case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice: return reflect.ValueOf(a).Len(), true + case reflect.Func: + if !miter.IsIter(a) { + return 0, false + } + var l int + if miter.IsSeq2(a) { + miter.IterateKV(a, func(k, v reflect.Value) bool { l++; return true }) + } else { + miter.IterateV(a, func(v reflect.Value) bool { l++; return true }) + } + return l, true default: return 0, false } diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index 7c7adb941..30f2beed3 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -29,6 +29,8 @@ type Gomega interface { SetDefaultEventuallyPollingInterval(time.Duration) SetDefaultConsistentlyDuration(time.Duration) SetDefaultConsistentlyPollingInterval(time.Duration) + EnforceDefaultTimeoutsWhenUsingContexts() + DisableDefaultTimeoutsWhenUsingContext() } // All Gomega matchers must implement the GomegaMatcher interface diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 0ce2d90fd..8abb7a567 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -12,7 +12,7 @@ | AdditionalRoutingCapabilities| | | Enabled | Enabled | Enabled | Enabled | | AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled | | BootcNodeManagement| | | Enabled | Enabled | Enabled | Enabled | -| CSIDriverSharedResource| | | Enabled | Enabled | Enabled | Enabled | +| CMPSMachineNamePrefix| | | Enabled | Enabled | Enabled | Enabled | | ClusterMonitoringConfig| | | Enabled | Enabled | Enabled | Enabled | | ConsolePluginContentSecurityPolicy| | | Enabled | Enabled | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | Enabled | Enabled | @@ -51,7 +51,6 @@ | UserNamespacesPodSecurityStandards| | | Enabled | Enabled | Enabled | Enabled | | UserNamespacesSupport| | | Enabled | Enabled | Enabled | Enabled | | VSphereMultiNetworks| | | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiVCenters| | | Enabled | Enabled | Enabled | Enabled | | VolumeAttributesClass| | | Enabled | Enabled | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | Enabled | Enabled | | ExternalOIDC| Enabled | | Enabled | Enabled | Enabled | Enabled | @@ -80,5 +79,6 @@ | SetEIPForNLBIngressController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereControlPlaneMachineSet| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereDriverConfiguration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereMultiVCenters| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereStaticIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ValidatingAdmissionPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/register.go b/vendor/github.com/openshift/hypershift/api/hypershift/register.go deleted file mode 100644 index 4a0a91d4b..000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/register.go +++ /dev/null @@ -1,3 +0,0 @@ -package hypershift - -const GroupName = "hypershift.openshift.io" diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/clusterconfig.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/clusterconfig.go deleted file mode 100644 index e8a913966..000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/clusterconfig.go +++ /dev/null @@ -1,15 +0,0 @@ -package v1alpha1 - -import configv1 "github.com/openshift/api/config/v1" - -func (c *ClusterConfiguration) GetAPIServer() *configv1.APIServerSpec { return c.APIServer } -func (c *ClusterConfiguration) GetAuthentication() *configv1.AuthenticationSpec { - return c.Authentication -} -func (c *ClusterConfiguration) GetFeatureGate() *configv1.FeatureGateSpec { return c.FeatureGate } -func (c *ClusterConfiguration) GetImage() *configv1.ImageSpec { return c.Image } -func (c *ClusterConfiguration) GetIngress() *configv1.IngressSpec { return c.Ingress } -func (c *ClusterConfiguration) GetNetwork() *configv1.NetworkSpec { return c.Network } -func (c *ClusterConfiguration) GetOAuth() *configv1.OAuthSpec { return c.OAuth } -func (c *ClusterConfiguration) GetScheduler() *configv1.SchedulerSpec { return c.Scheduler } -func (c *ClusterConfiguration) GetProxy() *configv1.ProxySpec { return c.Proxy } diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/conditions.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/conditions.go deleted file mode 100644 index b13b49834..000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/conditions.go +++ /dev/null @@ -1,118 +0,0 @@ -package v1alpha1 - -// HostedCluster conditions. -const ( - // HostedClusterAvailable indicates whether the HostedCluster has a healthy - // control plane. - HostedClusterAvailable ConditionType = "Available" - // HostedClusterProgressing indicates whether the HostedCluster is attempting - // an initial deployment or upgrade. - HostedClusterProgressing ConditionType = "Progressing" - // HostedClusterDegraded indicates whether the HostedCluster is encountering - // an error that may require user intervention to resolve. - HostedClusterDegraded ConditionType = "Degraded" - - // Bubble up from HCP. - - // InfrastructureReady bubbles up the same condition from HCP. - InfrastructureReady ConditionType = "InfrastructureReady" - // KubeAPIServerAvailable bubbles up the same condition from HCP. - KubeAPIServerAvailable ConditionType = "KubeAPIServerAvailable" - // EtcdAvailable bubbles up the same condition from HCP. - EtcdAvailable ConditionType = "EtcdAvailable" - // ValidHostedControlPlaneConfiguration bubbles up the same condition from HCP. - ValidHostedControlPlaneConfiguration ConditionType = "ValidHostedControlPlaneConfiguration" - - // Bubble up from HCP which bubbles up from CVO. - - // ClusterVersionSucceeding indicates the current status of the desired release - // version of the HostedCluster as indicated by the Failing condition in the - // underlying cluster's ClusterVersion. - ClusterVersionSucceeding ConditionType = "ClusterVersionSucceeding" - // ClusterVersionUpgradeable indicates the Upgradeable condition in the - // underlying cluster's ClusterVersion. - ClusterVersionUpgradeable ConditionType = "ClusterVersionUpgradeable" - // ClusterVersionFailing bubbles up Failing from the CVO. - ClusterVersionFailing ConditionType = "ClusterVersionFailing" - // ClusterVersionProgressing bubbles up configv1.OperatorProgressing from the CVO. - ClusterVersionProgressing ConditionType = "ClusterVersionProgressing" - // ClusterVersionAvailable bubbles up Failing configv1.OperatorAvailable from the CVO. - ClusterVersionAvailable ConditionType = "ClusterVersionAvailable" - // ClusterVersionReleaseAccepted bubbles up Failing ReleaseAccepted from the CVO. - ClusterVersionReleaseAccepted ConditionType = "ClusterVersionReleaseAccepted" - - // UnmanagedEtcdAvailable indicates whether a user-managed etcd cluster is - // healthy. - UnmanagedEtcdAvailable ConditionType = "UnmanagedEtcdAvailable" - - // IgnitionEndpointAvailable indicates whether the ignition server for the - // HostedCluster is available to handle ignition requests. - IgnitionEndpointAvailable ConditionType = "IgnitionEndpointAvailable" - - // ValidHostedClusterConfiguration indicates (if status is true) that the - // ClusterConfiguration specified for the HostedCluster is valid. - ValidHostedClusterConfiguration ConditionType = "ValidConfiguration" - - // SupportedHostedCluster indicates whether a HostedCluster is supported by - // the current configuration of the hypershift-operator. - // e.g. If HostedCluster requests endpointAcess Private but the hypershift-operator - // is running on a management cluster outside AWS or is not configured with AWS - // credentials, the HostedCluster is not supported. - SupportedHostedCluster ConditionType = "SupportedHostedCluster" - - // ValidOIDCConfiguration indicates if an AWS cluster's OIDC condition is - // detected as invalid. - ValidOIDCConfiguration ConditionType = "ValidOIDCConfiguration" - - // ValidReleaseImage indicates if the release image set in the spec is valid - // for the HostedCluster. For example, this can be set false if the - // HostedCluster itself attempts an unsupported version before 4.9 or an - // unsupported upgrade e.g y-stream upgrade before 4.11. - ValidReleaseImage ConditionType = "ValidReleaseImage" - - // PlatformCredentialsFound indicates that credentials required for the - // desired platform are valid. - PlatformCredentialsFound ConditionType = "PlatformCredentialsFound" - - // ReconciliationActive indicates if reconciliation of the HostedCluster is - // active or paused. - ReconciliationActive ConditionType = "ReconciliationActive" - // ReconciliationSucceeded indicates if the HostedCluster reconciliation - // succeeded. - ReconciliationSucceeded ConditionType = "ReconciliationSucceeded" -) - -// Reasons. -const ( - StatusUnknownReason = "StatusUnknown" - AsExpectedReason = "AsExpected" - NotFoundReason = "NotFound" - WaitingForAvailableReason = "waitingForAvailable" - SecretNotFoundReason = "SecretNotFound" - - InfraStatusFailureReason = "InfraStatusFailure" - WaitingOnInfrastructureReadyReason = "WaitingOnInfrastructureReady" - - EtcdQuorumAvailableReason = "QuorumAvailable" - EtcdWaitingForQuorumReason = "EtcdWaitingForQuorum" - EtcdStatefulSetNotFoundReason = "StatefulSetNotFound" - - UnmanagedEtcdMisconfiguredReason = "UnmanagedEtcdMisconfigured" - UnmanagedEtcdAsExpected = "UnmanagedEtcdAsExpected" - - FromClusterVersionReason = "FromClusterVersion" - - InvalidConfigurationReason = "InvalidConfiguration" - KubeconfigWaitingForCreateReason = "KubeconfigWaitingForCreate" - UnsupportedHostedClusterReason = "UnsupportedHostedCluster" - InsufficientClusterCapabilitiesReason = "InsufficientClusterCapabilities" - OIDCConfigurationInvalidReason = "OIDCConfigurationInvalid" - PlatformCredentialsNotFoundReason = "PlatformCredentialsNotFound" - InvalidImageReason = "InvalidImage" -) - -// Messages. -const ( - // AllIsWellMessage is standard message. - AllIsWellMessage = "All is well" -) diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/doc.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/doc.go deleted file mode 100644 index 6f154af40..000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -/* -Package v1alpha1 contains the HyperShift API. - -The HyperShift API enables creating and managing lightweight, flexible, heterogeneous -OpenShift clusters at scale. - -HyperShift clusters are deployed in a topology which isolates the "control plane" -(e.g. etcd, the API server, controller manager, etc.) from the "data plane" (e.g. -worker nodes and their kubelets, and the infrastructure on which they run). This -enables "hosted control plane as a service" use cases. -*/ -// +kubebuilder:object:generate=true -// +groupName=hypershift.openshift.io -package v1alpha1 diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/endpointservice_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/endpointservice_types.go deleted file mode 100644 index 8b02c543b..000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/endpointservice_types.go +++ /dev/null @@ -1,102 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -func init() { - SchemeBuilder.Register(func(s *runtime.Scheme) error { - s.AddKnownTypes(SchemeGroupVersion, - &AWSEndpointService{}, - &AWSEndpointServiceList{}, - ) - metav1.AddToGroupVersion(s, SchemeGroupVersion) - return nil - }) -} - -// The following are reasons for the IgnitionEndpointAvailable condition. -const ( - // AWSEndpointServiceAvailable indicates whether the AWS Endpoint Service - // has been created for the specified NLB in the management VPC - AWSEndpointServiceAvailable ConditionType = "EndpointServiceAvailable" - - // AWSEndpointServiceAvailable indicates whether the AWS Endpoint has been - // created in the guest VPC - AWSEndpointAvailable ConditionType = "EndpointAvailable" - - AWSSuccessReason string = "AWSSuccess" - AWSErrorReason string = "AWSError" -) - -// AWSEndpointServiceSpec defines the desired state of AWSEndpointService -type AWSEndpointServiceSpec struct { - // The name of the NLB for which an Endpoint Service should be configured - NetworkLoadBalancerName string `json:"networkLoadBalancerName"` - - // SubnetIDs is the list of subnet IDs to which guest nodes can attach - // +optional - SubnetIDs []string `json:"subnetIDs,omitempty"` - - // Tags to apply to the EndpointService - // +optional - ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` -} - -// AWSEndpointServiceStatus defines the observed state of AWSEndpointService -type AWSEndpointServiceStatus struct { - // EndpointServiceName is the name of the Endpoint Service created in the - // management VPC - // +optional - EndpointServiceName string `json:"endpointServiceName,omitempty"` - - // EndpointID is the ID of the Endpoint created in the guest VPC - // +optional - EndpointID string `json:"endpointID,omitempty"` - - // Deprecated: Use DNSNames instead - // +optional - DNSName string `json:"dnsName,omitempty"` - - // DNSName are the names for the records created in the hypershift private zone - // +optional - DNSNames []string `json:"dnsNames,omitempty"` - - // DNSZoneID is ID for the hypershift private zone - // +optional - DNSZoneID string `json:"dnsZoneID,omitempty"` - - // Conditions contains details for the current state of the Endpoint Service - // request If there is an error processing the request e.g. the NLB doesn't - // exist, then the Available condition will be false, reason AWSErrorReason, - // and the error reported in the message. - // - // Current condition types are: "Available" - // +optional - // +listType=map - // +listMapKey=type - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []metav1.Condition `json:"conditions,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=awsendpointservices,scope=Namespaced -// +kubebuilder:subresource:status -// AWSEndpointService specifies a request for an Endpoint Service in AWS -type AWSEndpointService struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec AWSEndpointServiceSpec `json:"spec,omitempty"` - Status AWSEndpointServiceStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true -// AWSEndpointServiceList contains a list of AWSEndpointService -type AWSEndpointServiceList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []AWSEndpointService `json:"items"` -} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/groupversion_info.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/groupversion_info.go deleted file mode 100644 index b63c943ea..000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,40 +0,0 @@ -// Package v1alpha1 contains API Schema definitions for the hypershift.openshift.io v1alpha1 API group -// +kubebuilder:object:generate=true -// +groupName=hypershift.openshift.io -package v1alpha1 - -import ( - "github.com/openshift/hypershift/api/hypershift" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: hypershift.GroupName, Version: "v1alpha1"} - SchemeGroupVersion = GroupVersion - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind. -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource. -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/hosted_controlplane.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/hosted_controlplane.go deleted file mode 100644 index bf4bd8275..000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/hosted_controlplane.go +++ /dev/null @@ -1,362 +0,0 @@ -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - configv1 "github.com/openshift/api/config/v1" -) - -func init() { - SchemeBuilder.Register(func(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &HostedControlPlane{}, - &HostedControlPlaneList{}, - ) - return nil - }) -} - -// HostedControlPlane defines the desired state of HostedControlPlane -// +kubebuilder:resource:path=hostedcontrolplanes,shortName=hcp;hcps,scope=Namespaced,categories=cluster-api -// +kubebuilder:subresource:status -// +kubebuilder:object:root=true -type HostedControlPlane struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec HostedControlPlaneSpec `json:"spec,omitempty"` - Status HostedControlPlaneStatus `json:"status,omitempty"` -} - -// HostedControlPlaneSpec defines the desired state of HostedControlPlane -type HostedControlPlaneSpec struct { - // ReleaseImage is the release image applied to the hosted control plane. - ReleaseImage string `json:"releaseImage"` - - // ControlPlaneReleaseImage specifies the desired OCP release payload for - // control plane components running on the management cluster. - // If not defined, ReleaseImage is used - ControlPlaneReleaseImage *string `json:"controlPlaneReleaseImage,omitempty"` - - // updateService may be used to specify the preferred upstream update service. - // By default it will use the appropriate update service for the cluster and region. - // - // +optional - UpdateService configv1.URL `json:"updateService,omitempty"` - - // channel is an identifier for explicitly requesting that a non-default - // set of updates be applied to this cluster. The default channel will be - // contain stable updates that are appropriate for production clusters. - // - // +optional - Channel string `json:"channel,omitempty"` - - PullSecret corev1.LocalObjectReference `json:"pullSecret"` - - // IssuerURL is an OIDC issuer URL which is used as the issuer in all - // ServiceAccount tokens generated by the control plane API server. The - // default value is kubernetes.default.svc, which only works for in-cluster - // validation. - IssuerURL string `json:"issuerURL"` - - // Networking specifies network configuration for the cluster. - // Temporarily optional for backward compatibility, required in future releases. - // +optional - Networking ClusterNetworking `json:"networking,omitempty"` - - // deprecated - // use networking.ServiceNetwork - // +optional - ServiceCIDR string `json:"serviceCIDR,omitempty"` - - // deprecated - // use networking.ClusterNetwork - // +optional - PodCIDR string `json:"podCIDR,omitempty"` - - // deprecated - // use networking.MachineNetwork - // +optional - MachineCIDR string `json:"machineCIDR,omitempty"` - - // deprecated - // use networking.NetworkType - // NetworkType specifies the SDN provider used for cluster networking. - // +optional - NetworkType NetworkType `json:"networkType,omitempty"` - - SSHKey corev1.LocalObjectReference `json:"sshKey"` - - // ClusterID is the unique id that identifies the cluster externally. - // Making it optional here allows us to keep compatibility with previous - // versions of the control-plane-operator that have no knowledge of this - // field. - // +optional - ClusterID string `json:"clusterID,omitempty"` - - InfraID string `json:"infraID"` - Platform PlatformSpec `json:"platform"` - DNS DNSSpec `json:"dns"` - - // ServiceAccountSigningKey is a reference to a secret containing the private key - // used by the service account token issuer. The secret is expected to contain - // a single key named "key". If not specified, a service account signing key will - // be generated automatically for the cluster. - // - // +optional - ServiceAccountSigningKey *corev1.LocalObjectReference `json:"serviceAccountSigningKey,omitempty"` - - // deprecated - // use networking.apiServer.APIPort - // APIPort is the port at which the APIServer listens inside a worker - // +optional - APIPort *int32 `json:"apiPort,omitempty"` - - // deprecated - // use networking.apiServer.AdvertiseAddress - // APIAdvertiseAddress is the address at which the APIServer listens - // inside a worker. - // +optional - APIAdvertiseAddress *string `json:"apiAdvertiseAddress,omitempty"` - - // deprecated - // use networking.apiServer.APIAllowedCIDRBlocks - // APIAllowedCIDRBlocks is an allow list of CIDR blocks that can access the APIServer - // If not specified, traffic is allowed from all addresses. - // This depends on underlying support by the cloud provider for Service LoadBalancerSourceRanges - // +optional - APIAllowedCIDRBlocks []CIDRBlock `json:"apiAllowedCIDRBlocks,omitempty"` - - // ControllerAvailabilityPolicy specifies the availability policy applied to - // critical control plane components. The default value is SingleReplica. - // - // +optional - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ControllerAvailabilityPolicy is immutable" - // +kubebuilder:default:="SingleReplica" - ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"` - - // InfrastructureAvailabilityPolicy specifies the availability policy applied - // to infrastructure services which run on cluster nodes. The default value is - // SingleReplica. - // - // +optional - // +kubebuilder:default:="SingleReplica" - InfrastructureAvailabilityPolicy AvailabilityPolicy `json:"infrastructureAvailabilityPolicy,omitempty"` - - // FIPS specifies if the nodes for the cluster will be running in FIPS mode - // +optional - FIPS bool `json:"fips"` - - // KubeConfig specifies the name and key for the kubeconfig secret - // +optional - KubeConfig *KubeconfigSecretRef `json:"kubeconfig,omitempty"` - - // Services defines metadata about how control plane services are published - // in the management cluster. - Services []ServicePublishingStrategyMapping `json:"services"` - - // AuditWebhook contains metadata for configuring an audit webhook - // endpoint for a cluster to process cluster audit events. It references - // a secret that contains the webhook information for the audit webhook endpoint. - // It is a secret because if the endpoint has MTLS the kubeconfig will contain client - // keys. This is currently only supported in IBM Cloud. The kubeconfig needs to be stored - // in the secret with a secret key name that corresponds to the constant AuditWebhookKubeconfigKey. - // +optional - AuditWebhook *corev1.LocalObjectReference `json:"auditWebhook,omitempty"` - - // Etcd contains metadata about the etcd cluster the hypershift managed Openshift control plane components - // use to store data. - Etcd EtcdSpec `json:"etcd"` - - // Configuration embeds resources that correspond to the openshift configuration API: - // https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html - // +kubebuilder:validation:Optional - Configuration *ClusterConfiguration `json:"configuration,omitempty"` - - // ImageContentSources lists sources/repositories for the release-image content. - // +optional - ImageContentSources []ImageContentSource `json:"imageContentSources,omitempty"` - - // AdditionalTrustBundle references a ConfigMap containing a PEM-encoded X.509 certificate bundle - // +optional - AdditionalTrustBundle *corev1.LocalObjectReference `json:"additionalTrustBundle,omitempty"` - - // SecretEncryption contains metadata about the kubernetes secret encryption strategy being used for the - // cluster when applicable. - // +optional - SecretEncryption *SecretEncryptionSpec `json:"secretEncryption,omitempty"` - - // PausedUntil is a field that can be used to pause reconciliation on a resource. - // Either a date can be provided in RFC3339 format or a boolean. If a date is - // provided: reconciliation is paused on the resource until that date. If the boolean true is - // provided: reconciliation is paused on the resource until the field is removed. - // +optional - PausedUntil *string `json:"pausedUntil,omitempty"` - - // OLMCatalogPlacement specifies the placement of OLM catalog components. By default, - // this is set to management and OLM catalog components are deployed onto the management - // cluster. If set to guest, the OLM catalog components will be deployed onto the guest - // cluster. - // - // +kubebuilder:default=management - // +optional - // +immutable - OLMCatalogPlacement OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"` - - // Autoscaling specifies auto-scaling behavior that applies to all NodePools - // associated with the control plane. - // - // +optional - Autoscaling ClusterAutoscaling `json:"autoscaling,omitempty"` - - // NodeSelector when specified, must be true for the pods managed by the HostedCluster to be scheduled. - // - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // Tolerations when specified, define what custome tolerations are added to the hcp pods. - // - // +optional - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` -} - -// AvailabilityPolicy specifies a high level availability policy for components. -type AvailabilityPolicy string - -const ( - // HighlyAvailable means components should be resilient to problems across - // fault boundaries as defined by the component to which the policy is - // attached. This usually means running critical workloads with 3 replicas and - // with little or no toleration of disruption of the component. - HighlyAvailable AvailabilityPolicy = "HighlyAvailable" - - // SingleReplica means components are not expected to be resilient to problems - // across most fault boundaries associated with high availability. This - // usually means running critical workloads with just 1 replica and with - // toleration of full disruption of the component. - SingleReplica AvailabilityPolicy = "SingleReplica" -) - -type KubeconfigSecretRef struct { - Name string `json:"name"` - Key string `json:"key"` -} - -type ConditionType string - -const ( - HostedControlPlaneAvailable ConditionType = "Available" - HostedControlPlaneDegraded ConditionType = "Degraded" - EtcdSnapshotRestored ConditionType = "EtcdSnapshotRestored" - CVOScaledDown ConditionType = "CVOScaledDown" - CloudResourcesDestroyed ConditionType = "CloudResourcesDestroyed" -) - -// HostedControlPlaneStatus defines the observed state of HostedControlPlane -type HostedControlPlaneStatus struct { - // Ready denotes that the HostedControlPlane API Server is ready to - // receive requests - // This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L226-L230 - // +kubebuilder:validation:Required - // +kubebuilder:default=false - Ready bool `json:"ready"` - - // Initialized denotes whether or not the control plane has - // provided a kubeadm-config. - // Once this condition is marked true, its value is never changed. See the Ready condition for an indication of - // the current readiness of the cluster's control plane. - // This satisfies CAPI contract https://github.com/kubernetes-sigs/cluster-api/blob/cd3a694deac89d5ebeb888307deaa61487207aa0/controllers/cluster_controller_phases.go#L238-L252 - // +kubebuilder:validation:Required - // +kubebuilder:default=false - Initialized bool `json:"initialized"` - - // ExternalManagedControlPlane indicates to cluster-api that the control plane - // is managed by an external service. - // https://github.com/kubernetes-sigs/cluster-api/blob/65e5385bffd71bf4aad3cf34a537f11b217c7fab/controllers/machine_controller.go#L468 - // +kubebuilder:default=true - ExternalManagedControlPlane *bool `json:"externalManagedControlPlane,omitempty"` - - // ControlPlaneEndpoint contains the endpoint information by which - // external clients can access the control plane. This is populated - // after the infrastructure is ready. - // +kubebuilder:validation:Optional - ControlPlaneEndpoint APIEndpoint `json:"controlPlaneEndpoint,omitempty"` - - // OAuthCallbackURLTemplate contains a template for the URL to use as a callback - // for identity providers. The [identity-provider-name] placeholder must be replaced - // with the name of an identity provider defined on the HostedCluster. - // This is populated after the infrastructure is ready. - // +kubebuilder:validation:Optional - OAuthCallbackURLTemplate string `json:"oauthCallbackURLTemplate,omitempty"` - - // versionStatus is the status of the release version applied by the - // hosted control plane operator. - // +optional - VersionStatus *ClusterVersionStatus `json:"versionStatus,omitempty"` - - // Version is the semantic version of the release applied by - // the hosted control plane operator - // - // Deprecated: Use versionStatus.desired.version instead. - // +kubebuilder:validation:Optional - Version string `json:"version,omitempty"` - - // ReleaseImage is the release image applied to the hosted control plane. - // - // Deprecated: Use versionStatus.desired.image instead. - // +optional - ReleaseImage string `json:"releaseImage,omitempty"` - - // lastReleaseImageTransitionTime is the time of the last update to the current - // releaseImage property. - // - // Deprecated: Use versionStatus.history[0].startedTime instead. - // +kubebuilder:validation:Optional - LastReleaseImageTransitionTime *metav1.Time `json:"lastReleaseImageTransitionTime,omitempty"` - - // KubeConfig is a reference to the secret containing the default kubeconfig - // for this control plane. - KubeConfig *KubeconfigSecretRef `json:"kubeConfig,omitempty"` - - // KubeadminPassword is a reference to the secret containing the initial kubeadmin password - // for the guest cluster. - // +optional - KubeadminPassword *corev1.LocalObjectReference `json:"kubeadminPassword,omitempty"` - - // Condition contains details for one aspect of the current state of the HostedControlPlane. - // Current condition types are: "Available" - // +optional - // +listType=map - // +listMapKey=type - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []metav1.Condition `json:"conditions,omitempty"` - - // Platform contains platform-specific status of the HostedCluster - // +optional - Platform *PlatformStatus `json:"platform,omitempty"` - - // +optional - - // NodeCount tracks the number of nodes in the HostedControlPlane. - NodeCount *int `json:"nodeCount,omitempty"` -} - -type APIEndpoint struct { - // Host is the hostname on which the API server is serving. - Host string `json:"host"` - - // Port is the port on which the API server is serving. - Port int32 `json:"port"` -} - -// +kubebuilder:object:root=true -// HostedControlPlaneList contains a list of HostedControlPlanes. -type HostedControlPlaneList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []HostedControlPlane `json:"items"` -} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/hostedcluster_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/hostedcluster_types.go deleted file mode 100644 index 3f7adbef5..000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/hostedcluster_types.go +++ /dev/null @@ -1,2222 +0,0 @@ -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - configv1 "github.com/openshift/api/config/v1" - - "github.com/openshift/hypershift/api/util/ipnet" -) - -func init() { - SchemeBuilder.Register(func(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &HostedCluster{}, - &HostedClusterList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil - }) -} - -const ( - // AuditWebhookKubeconfigKey is the key name in the AuditWebhook secret that stores audit webhook kubeconfig - AuditWebhookKubeconfigKey = "webhook-kubeconfig" - DisablePKIReconciliationAnnotation = "hypershift.openshift.io/disable-pki-reconciliation" - SkipReleaseImageValidation = "hypershift.openshift.io/skip-release-image-validation" - IdentityProviderOverridesAnnotationPrefix = "idpoverrides.hypershift.openshift.io/" - OauthLoginURLOverrideAnnotation = "oauth.hypershift.openshift.io/login-url-override" - // KonnectivityServerImageAnnotation is a temporary annotation that allows the specification of the konnectivity server image. - // This will be removed when Konnectivity is added to the Openshift release payload - KonnectivityServerImageAnnotation = "hypershift.openshift.io/konnectivity-server-image" - // KonnectivityAgentImageAnnotation is a temporary annotation that allows the specification of the konnectivity agent image. - // This will be removed when Konnectivity is added to the Openshift release payload - KonnectivityAgentImageAnnotation = "hypershift.openshift.io/konnectivity-agent-image" - // ControlPlaneOperatorImageAnnotation is a annotation that allows the specification of the control plane operator image. - // This is used for development and e2e workflows - ControlPlaneOperatorImageAnnotation = "hypershift.openshift.io/control-plane-operator-image" - // RestartDateAnnotation is a annotation that can be used to trigger a rolling restart of all components managed by hypershift. - // it is important in some situations like CA rotation where components need to be fully restarted to pick up new CAs. It's also - // important in some recovery situations where a fresh start of the component helps fix symptoms a user might be experiencing. - RestartDateAnnotation = "hypershift.openshift.io/restart-date" - // ReleaseImageAnnotation is an annotation that can be used to see what release image a given deployment is tied to - ReleaseImageAnnotation = "hypershift.openshift.io/release-image" - // ClusterAPIManagerImage is an annotation that allows the specification of the cluster api manager image. - // This is a temporary workaround necessary for compliance reasons on the IBM Cloud side: - // no images can be pulled from registries outside of IBM Cloud's official regional registries - ClusterAPIManagerImage = "hypershift.openshift.io/capi-manager-image" - // ClusterAutoscalerImage is an annotation that allows the specification of the cluster autoscaler image. - // This is a temporary workaround necessary for compliance reasons on the IBM Cloud side: - // no images can be pulled from registries outside of IBM Cloud's official regional registries - ClusterAutoscalerImage = "hypershift.openshift.io/cluster-autoscaler-image" - // AWSKMSProviderImage is an annotation that allows the specification of the AWS kms provider image. - // Upstream code located at: https://github.com/kubernetes-sigs/aws-encryption-provider - AWSKMSProviderImage = "hypershift.openshift.io/aws-kms-provider-image" - // IBMCloudKMSProviderImage is an annotation that allows the specification of the IBM Cloud kms provider image. - IBMCloudKMSProviderImage = "hypershift.openshift.io/ibmcloud-kms-provider-image" - // PortierisImageAnnotation is an annotation that allows the specification of the portieries component - // (performs container image verification). - PortierisImageAnnotation = "hypershift.openshift.io/portieris-image" - // Configure ingress controller with endpoint publishing strategy as Private. - // This overrides any opinionated strategy set by platform in ReconcileDefaultIngressController. - // It's used by IBM cloud to support ingress endpoint publishing strategy scope - // NOTE: We'll expose this in the API if the use case gets generalised. - PrivateIngressControllerAnnotation = "hypershift.openshift.io/private-ingress-controller" - - // ClusterAPIProviderAWSImage overrides the CAPI AWS provider image to use for - // a HostedControlPlane. - ClusterAPIProviderAWSImage = "hypershift.openshift.io/capi-provider-aws-image" - - // ClusterAPIKubeVirtProviderImage overrides the CAPI KubeVirt provider image to use for - // a HostedControlPlane. - ClusterAPIKubeVirtProviderImage = "hypershift.openshift.io/capi-provider-kubevirt-image" - - // ClusterAPIAgentProviderImage overrides the CAPI Agent provider image to use for - // a HostedControlPlane. - ClusterAPIAgentProviderImage = "hypershift.openshift.io/capi-provider-agent-image" - - // ClusterAPIAzureProviderImage overrides the CAPI Azure provider image to use for - // a HostedControlPlane. - ClusterAPIAzureProviderImage = "hypershift.openshift.io/capi-provider-azure-image" - - // ClusterAPIPowerVSProviderImage overrides the CAPI PowerVS provider image to use for - // a HostedControlPlane. - ClusterAPIPowerVSProviderImage = "hypershift.openshift.io/capi-provider-powervs-image" - - // AESCBCKeySecretKey defines the Kubernetes secret key name that contains the aescbc encryption key - // in the AESCBC secret encryption strategy - AESCBCKeySecretKey = "key" - // IBMCloudIAMAPIKeySecretKey defines the Kubernetes secret key name that contains - // the customer IBMCloud apikey in the unmanaged authentication strategy for IBMCloud KMS secret encryption - IBMCloudIAMAPIKeySecretKey = "iam_apikey" - // AWSCredentialsFileSecretKey defines the Kubernetes secret key name that contains - // the customer AWS credentials in the unmanaged authentication strategy for AWS KMS secret encryption - AWSCredentialsFileSecretKey = "credentials" - - // ControlPlaneComponent identifies a resource as belonging to a hosted control plane. - ControlPlaneComponent = "hypershift.openshift.io/control-plane-component" - - // OperatorComponent identifies a component as belonging to the operator. - OperatorComponent = "hypershift.openshift.io/operator-component" - // MachineApproverImage is an annotation that allows the specification of the machine approver image. - // This is a temporary workaround necessary for compliance reasons on the IBM Cloud side: - // no images can be pulled from registries outside of IBM Cloud's official regional registries - MachineApproverImage = "hypershift.openshift.io/machine-approver-image" - - // ExternalDNSHostnameAnnotation is the annotation external-dns uses to register DNS name for different HCP services. - ExternalDNSHostnameAnnotation = "external-dns.alpha.kubernetes.io/hostname" - - // ForceUpgradeToAnnotation is the annotation that forces HostedCluster upgrade even if the underlying ClusterVersion - // is reporting it is not Upgradeable. The annotation value must be set to the release image being forced. - ForceUpgradeToAnnotation = "hypershift.openshift.io/force-upgrade-to" - - // ServiceAccountSigningKeySecretKey is the name of the secret key that should contain the service account signing - // key if specified. - ServiceAccountSigningKeySecretKey = "key" - - // DisableProfilingAnnotation is the annotation that allows disabling profiling for control plane components. - // Any components specified in this list will have profiling disabled. Profiling is disabled by default for etcd and konnectivity. - // Components this annotation can apply to: kube-scheduler, kube-controller-manager, kube-apiserver. - DisableProfilingAnnotation = "hypershift.openshift.io/disable-profiling" - - // CleanupCloudResourcesAnnotation is an annotation that indicates whether a guest cluster's resources should be - // removed when deleting the corresponding HostedCluster. If set to "true", resources created on the cloud provider during the life - // of the cluster will be removed, including image registry storage, ingress dns records, load balancers, and persistent storage. - CleanupCloudResourcesAnnotation = "hypershift.openshift.io/cleanup-cloud-resources" - - // ResourceRequestOverrideAnnotationPrefix is a prefix for an annotation to override resource requests for a particular deployment/container - // in a hosted control plane. The format of the annotation is: - // resource-request-override.hypershift.openshift.io/[deployment-name].[container-name]: [resource-type-1]=[value1],[resource-type-2]=[value2],... - // For example, to override the memory and cpu request for the Kubernetes APIServer: - // resource-request-override.hypershift.openshift.io/kube-apiserver.kube-apiserver: memory=3Gi,cpu=2000m - ResourceRequestOverrideAnnotationPrefix = "resource-request-override.hypershift.openshift.io" -) - -// HostedClusterSpec is the desired behavior of a HostedCluster. -type HostedClusterSpec struct { - // Release specifies the desired OCP release payload for the hosted cluster. - // - // Updating this field will trigger a rollout of the control plane. The - // behavior of the rollout will be driven by the ControllerAvailabilityPolicy - // and InfrastructureAvailabilityPolicy. - Release Release `json:"release"` - - // ControlPlaneRelease specifies the desired OCP release payload for - // control plane components running on the management cluster. - // Updating this field will trigger a rollout of the control plane. The - // behavior of the rollout will be driven by the ControllerAvailabilityPolicy - // and InfrastructureAvailabilityPolicy. - // If not defined, Release is used - // +optional - ControlPlaneRelease *Release `json:"controlPlaneRelease,omitempty"` - - // ClusterID uniquely identifies this cluster. This is expected to be - // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in - // hexadecimal values). - // As with a Kubernetes metadata.uid, this ID uniquely identifies this - // cluster in space and time. - // This value identifies the cluster in metrics pushed to telemetry and - // metrics produced by the control plane operators. If a value is not - // specified, an ID is generated. After initial creation, the value is - // immutable. - // +kubebuilder:validation:Pattern:="[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" - // +optional - ClusterID string `json:"clusterID,omitempty"` - - // updateService may be used to specify the preferred upstream update service. - // By default it will use the appropriate update service for the cluster and region. - // - // +optional - UpdateService configv1.URL `json:"updateService,omitempty"` - - // channel is an identifier for explicitly requesting that a non-default - // set of updates be applied to this cluster. The default channel will be - // contain stable updates that are appropriate for production clusters. - // - // +optional - Channel string `json:"channel,omitempty"` - - // InfraID is a globally unique identifier for the cluster. This identifier - // will be used to associate various cloud resources with the HostedCluster - // and its associated NodePools. - // - // +optional - // +immutable - InfraID string `json:"infraID,omitempty"` - - // Platform specifies the underlying infrastructure provider for the cluster - // and is used to configure platform specific behavior. - // - // +immutable - Platform PlatformSpec `json:"platform"` - - // ControllerAvailabilityPolicy specifies the availability policy applied to - // critical control plane components. The default value is HighlyAvailable. - // - // +optional - // +kubebuilder:default:="HighlyAvailable" - // +immutable - ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"` - - // InfrastructureAvailabilityPolicy specifies the availability policy applied - // to infrastructure services which run on cluster nodes. The default value is - // SingleReplica. - // - // +optional - // +kubebuilder:default:="SingleReplica" - // +immutable - InfrastructureAvailabilityPolicy AvailabilityPolicy `json:"infrastructureAvailabilityPolicy,omitempty"` - - // DNS specifies DNS configuration for the cluster. - // - // +immutable - DNS DNSSpec `json:"dns,omitempty"` - - // Networking specifies network configuration for the cluster. - // - // +immutable - // +kubebuilder:default={networkType: "OVNKubernetes", clusterNetwork: {{cidr: "10.132.0.0/14"}}, serviceNetwork: {{cidr: "172.31.0.0/16"}}} - Networking ClusterNetworking `json:"networking"` - - // Autoscaling specifies auto-scaling behavior that applies to all NodePools - // associated with the control plane. - // - // +optional - Autoscaling ClusterAutoscaling `json:"autoscaling,omitempty"` - - // Etcd specifies configuration for the control plane etcd cluster. The - // default ManagementType is Managed. Once set, the ManagementType cannot be - // changed. - // - // +kubebuilder:validation:Optional - // +kubebuilder:default={managementType: "Managed", managed: {storage: {type: "PersistentVolume", persistentVolume: {size: "4Gi"}}}} - // +immutable - Etcd EtcdSpec `json:"etcd"` - - // Services specifies how individual control plane services are published from - // the hosting cluster of the control plane. - // - // If a given service is not present in this list, it will be exposed publicly - // by default. - Services []ServicePublishingStrategyMapping `json:"services"` - - // PullSecret references a pull secret to be injected into the container - // runtime of all cluster nodes. The secret must have a key named - // ".dockerconfigjson" whose value is the pull secret JSON. - // - // +immutable - PullSecret corev1.LocalObjectReference `json:"pullSecret"` - - // SSHKey references an SSH key to be injected into all cluster node sshd - // servers. The secret must have a single key "id_rsa.pub" whose value is the - // public part of an SSH key. - // - // +immutable - SSHKey corev1.LocalObjectReference `json:"sshKey"` - - // IssuerURL is an OIDC issuer URL which is used as the issuer in all - // ServiceAccount tokens generated by the control plane API server. The - // default value is kubernetes.default.svc, which only works for in-cluster - // validation. - // - // +kubebuilder:default:="https://kubernetes.default.svc" - // +immutable - // +optional - // +kubebuilder:validation:Format=uri - IssuerURL string `json:"issuerURL,omitempty"` - - // ServiceAccountSigningKey is a reference to a secret containing the private key - // used by the service account token issuer. The secret is expected to contain - // a single key named "key". If not specified, a service account signing key will - // be generated automatically for the cluster. When specifying a service account - // signing key, a IssuerURL must also be specified. - // - // +immutable - // +kubebuilder:validation:Optional - // +optional - ServiceAccountSigningKey *corev1.LocalObjectReference `json:"serviceAccountSigningKey,omitempty"` - - // Configuration specifies configuration for individual OCP components in the - // cluster, represented as embedded resources that correspond to the openshift - // configuration API. - // - // +kubebuilder:validation:Optional - // +optional - Configuration *ClusterConfiguration `json:"configuration,omitempty"` - - // AuditWebhook contains metadata for configuring an audit webhook endpoint - // for a cluster to process cluster audit events. It references a secret that - // contains the webhook information for the audit webhook endpoint. It is a - // secret because if the endpoint has mTLS the kubeconfig will contain client - // keys. The kubeconfig needs to be stored in the secret with a secret key - // name that corresponds to the constant AuditWebhookKubeconfigKey. - // - // +optional - // +immutable - AuditWebhook *corev1.LocalObjectReference `json:"auditWebhook,omitempty"` - - // ImageContentSources specifies image mirrors that can be used by cluster - // nodes to pull content. - // - // +optional - // +immutable - ImageContentSources []ImageContentSource `json:"imageContentSources,omitempty"` - - // AdditionalTrustBundle is a reference to a ConfigMap containing a - // PEM-encoded X.509 certificate bundle that will be added to the hosted controlplane and nodes - // - // +optional - AdditionalTrustBundle *corev1.LocalObjectReference `json:"additionalTrustBundle,omitempty"` - - // SecretEncryption specifies a Kubernetes secret encryption strategy for the - // control plane. - // - // +optional - SecretEncryption *SecretEncryptionSpec `json:"secretEncryption,omitempty"` - - // FIPS indicates whether this cluster's nodes will be running in FIPS mode. - // If set to true, the control plane's ignition server will be configured to - // expect that nodes joining the cluster will be FIPS-enabled. - // - // +optional - // +immutable - FIPS bool `json:"fips"` - - // PausedUntil is a field that can be used to pause reconciliation on a resource. - // Either a date can be provided in RFC3339 format or a boolean. If a date is - // provided: reconciliation is paused on the resource until that date. If the boolean true is - // provided: reconciliation is paused on the resource until the field is removed. - // +optional - PausedUntil *string `json:"pausedUntil,omitempty"` - - // OLMCatalogPlacement specifies the placement of OLM catalog components. By default, - // this is set to management and OLM catalog components are deployed onto the management - // cluster. If set to guest, the OLM catalog components will be deployed onto the guest - // cluster. - // - // +kubebuilder:default=management - // +optional - // +immutable - OLMCatalogPlacement OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"` - - // NodeSelector when specified, must be true for the pods managed by the HostedCluster to be scheduled. - // - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // Tolerations when specified, define what custome tolerations are added to the hcp pods. - // - // +optional - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` -} - -// OLMCatalogPlacement is an enum specifying the placement of OLM catalog components. -// +kubebuilder:validation:Enum=management;guest -type OLMCatalogPlacement string - -const ( - // ManagementOLMCatalogPlacement indicates OLM catalog components will be placed in - // the management cluster. - ManagementOLMCatalogPlacement OLMCatalogPlacement = "management" - - // GuestOLMCatalogPlacement indicates OLM catalog components will be placed in - // the guest cluster. - GuestOLMCatalogPlacement OLMCatalogPlacement = "guest" -) - -// ImageContentSource specifies image mirrors that can be used by cluster nodes -// to pull content. For cluster workloads, if a container image registry host of -// the pullspec matches Source then one of the Mirrors are substituted as hosts -// in the pullspec and tried in order to fetch the image. -type ImageContentSource struct { - // Source is the repository that users refer to, e.g. in image pull - // specifications. - // - // +immutable - Source string `json:"source"` - - // Mirrors are one or more repositories that may also contain the same images. - // - // +optional - // +immutable - Mirrors []string `json:"mirrors,omitempty"` -} - -// ServicePublishingStrategyMapping specifies how individual control plane -// services are published from the hosting cluster of a control plane. -type ServicePublishingStrategyMapping struct { - // Service identifies the type of service being published. - // - // +kubebuilder:validation:Enum=APIServer;OAuthServer;OIDC;Konnectivity;Ignition;OVNSbDb - // +immutable - Service ServiceType `json:"service"` - - // ServicePublishingStrategy specifies how to publish Service. - ServicePublishingStrategy `json:"servicePublishingStrategy"` -} - -// ServicePublishingStrategy specfies how to publish a ServiceType. -type ServicePublishingStrategy struct { - // Type is the publishing strategy used for the service. - // - // +kubebuilder:validation:Enum=LoadBalancer;NodePort;Route;None;S3 - // +immutable - Type PublishingStrategyType `json:"type"` - - // NodePort configures exposing a service using a NodePort. - NodePort *NodePortPublishingStrategy `json:"nodePort,omitempty"` - - // LoadBalancer configures exposing a service using a LoadBalancer. - LoadBalancer *LoadBalancerPublishingStrategy `json:"loadBalancer,omitempty"` - - // Route configures exposing a service using a Route. - Route *RoutePublishingStrategy `json:"route,omitempty"` -} - -// PublishingStrategyType defines publishing strategies for services. -type PublishingStrategyType string - -var ( - // LoadBalancer exposes a service with a LoadBalancer kube service. - LoadBalancer PublishingStrategyType = "LoadBalancer" - // NodePort exposes a service with a NodePort kube service. - NodePort PublishingStrategyType = "NodePort" - // Route exposes services with a Route + ClusterIP kube service. - Route PublishingStrategyType = "Route" - // S3 exoses a service through an S3 bucket - S3 PublishingStrategyType = "S3" - // None disables exposing the service - None PublishingStrategyType = "None" -) - -// ServiceType defines what control plane services can be exposed from the -// management control plane. -type ServiceType string - -var ( - // APIServer is the control plane API server. - APIServer ServiceType = "APIServer" - - // Konnectivity is the control plane Konnectivity networking service. - Konnectivity ServiceType = "Konnectivity" - - // OAuthServer is the control plane OAuth service. - OAuthServer ServiceType = "OAuthServer" - - // OIDC is the control plane OIDC service. - OIDC ServiceType = "OIDC" - - // Ignition is the control plane ignition service for nodes. - Ignition ServiceType = "Ignition" - - // OVNSbDb is the optional control plane ovn southbound database service used by OVNKubernetes CNI. - OVNSbDb ServiceType = "OVNSbDb" -) - -// NodePortPublishingStrategy specifies a NodePort used to expose a service. -type NodePortPublishingStrategy struct { - // Address is the host/ip that the NodePort service is exposed over. - Address string `json:"address"` - - // Port is the port of the NodePort service. If <=0, the port is dynamically - // assigned when the service is created. - Port int32 `json:"port,omitempty"` -} - -// LoadBalancerPublishingStrategy specifies setting used to expose a service as a LoadBalancer. -type LoadBalancerPublishingStrategy struct { - // Hostname is the name of the DNS record that will be created pointing to the LoadBalancer. - // +optional - Hostname string `json:"hostname,omitempty"` -} - -// RoutePublishingStrategy specifies options for exposing a service as a Route. -type RoutePublishingStrategy struct { - // Hostname is the name of the DNS record that will be created pointing to the Route. - // +optional - Hostname string `json:"hostname,omitempty"` -} - -// DNSSpec specifies the DNS configuration in the cluster. -type DNSSpec struct { - // BaseDomain is the base domain of the cluster. - // - // +immutable - BaseDomain string `json:"baseDomain"` - - // BaseDomainPrefix is the base domain prefix of the cluster. - // defaults to clusterName if not set - // - // +optional - // +immutable - BaseDomainPrefix *string `json:"baseDomainPrefix,omitempty"` - - // PublicZoneID is the Hosted Zone ID where all the DNS records that are - // publicly accessible to the internet exist. - // - // +optional - // +immutable - PublicZoneID string `json:"publicZoneID,omitempty"` - - // PrivateZoneID is the Hosted Zone ID where all the DNS records that are only - // available internally to the cluster exist. - // - // +optional - // +immutable - PrivateZoneID string `json:"privateZoneID,omitempty"` -} - -// ClusterNetworking specifies network configuration for a cluster. -type ClusterNetworking struct { - // Deprecated - // This field will be removed in the next API release. - // Use ServiceNetwork instead - // +immutable - // +optional - // +kubebuilder:validation:Format=cidr - ServiceCIDR string `json:"serviceCIDR,omitempty"` - - // Deprecated - // This field will be removed in the next API release. - // Use ClusterNetwork instead - // - // +immutable - // +optional - // +kubebuilder:validation:Format=cidr - PodCIDR string `json:"podCIDR,omitempty"` - - // Deprecated - // This field will be removed in the next API release. - // Use MachineNetwork instead - // +immutable - // +optional - // +kubebuilder:validation:Format=cidr - MachineCIDR string `json:"machineCIDR,omitempty"` - - // MachineNetwork is the list of IP address pools for machines. - // TODO: make this required in the next version of the API - // - // +immutable - // +optional - MachineNetwork []MachineNetworkEntry `json:"machineNetwork,omitempty"` - - // ClusterNetwork is the list of IP address pools for pods. - // TODO: make this required in the next version of the API - // - // +immutable - // +optional - // +kubebuilder:default:={{cidr: "10.132.0.0/14"}} - ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` - - // ServiceNetwork is the list of IP address pools for services. - // NOTE: currently only one entry is supported. - // TODO: make this required in the next version of the API - // - // +immutable - // +optional - // +kubebuilder:default:={{cidr: "172.31.0.0/16"}} - ServiceNetwork []ServiceNetworkEntry `json:"serviceNetwork,omitempty"` - - // NetworkType specifies the SDN provider used for cluster networking. - // - // +kubebuilder:default:="OVNKubernetes" - // +immutable - NetworkType NetworkType `json:"networkType"` - - // APIServer contains advanced network settings for the API server that affect - // how the APIServer is exposed inside a cluster node. - // - // +immutable - APIServer *APIServerNetworking `json:"apiServer,omitempty"` -} - -// MachineNetworkEntry is a single IP address block for node IP blocks. -type MachineNetworkEntry struct { - // CIDR is the IP block address pool for machines within the cluster. - CIDR ipnet.IPNet `json:"cidr"` -} - -// ClusterNetworkEntry is a single IP address block for pod IP blocks. IP blocks -// are allocated with size 2^HostSubnetLength. -type ClusterNetworkEntry struct { - // CIDR is the IP block address pool. - CIDR ipnet.IPNet `json:"cidr"` - - // HostPrefix is the prefix size to allocate to each node from the CIDR. - // For example, 24 would allocate 2^8=256 adresses to each node. If this - // field is not used by the plugin, it can be left unset. - // +optional - HostPrefix int32 `json:"hostPrefix,omitempty"` -} - -// ServiceNetworkEntry is a single IP address block for the service network. -type ServiceNetworkEntry struct { - // CIDR is the IP block address pool for services within the cluster. - CIDR ipnet.IPNet `json:"cidr"` -} - -// +kubebuilder:validation:Pattern:=`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$` -type CIDRBlock string - -// APIServerNetworking specifies how the APIServer is exposed inside a cluster -// node. -type APIServerNetworking struct { - // AdvertiseAddress is the address that nodes will use to talk to the API - // server. This is an address associated with the loopback adapter of each - // node. If not specified, 172.20.0.1 is used. - AdvertiseAddress *string `json:"advertiseAddress,omitempty"` - - // Port is the port at which the APIServer is exposed inside a node. Other - // pods using host networking cannot listen on this port. If not specified, - // 6443 is used. - Port *int32 `json:"port,omitempty"` - - // AllowedCIDRBlocks is an allow list of CIDR blocks that can access the APIServer - // If not specified, traffic is allowed from all addresses. - // This depends on underlying support by the cloud provider for Service LoadBalancerSourceRanges - AllowedCIDRBlocks []CIDRBlock `json:"allowedCIDRBlocks,omitempty"` -} - -// NetworkType specifies the SDN provider used for cluster networking. -// -// +kubebuilder:validation:Enum=OpenShiftSDN;Calico;OVNKubernetes;Other -type NetworkType string - -const ( - // OpenShiftSDN specifies OpenShiftSDN as the SDN provider - OpenShiftSDN NetworkType = "OpenShiftSDN" - - // Calico specifies Calico as the SDN provider - Calico NetworkType = "Calico" - - // OVNKubernetes specifies OVN as the SDN provider - OVNKubernetes NetworkType = "OVNKubernetes" - - // Other specifies an undefined SDN provider - Other NetworkType = "Other" -) - -// PlatformType is a specific supported infrastructure provider. -// -// +kubebuilder:validation:Enum=AWS;None;IBMCloud;Agent;KubeVirt;Azure;PowerVS -type PlatformType string - -const ( - // AWSPlatform represents Amazon Web Services infrastructure. - AWSPlatform PlatformType = "AWS" - - // NonePlatform represents user supplied (e.g. bare metal) infrastructure. - NonePlatform PlatformType = "None" - - // IBMCloudPlatform represents IBM Cloud infrastructure. - IBMCloudPlatform PlatformType = "IBMCloud" - - // AgentPlatform represents user supplied insfrastructure booted with agents. - AgentPlatform PlatformType = "Agent" - - // KubevirtPlatform represents Kubevirt infrastructure. - KubevirtPlatform PlatformType = "KubeVirt" - - // AzurePlatform represents Azure infrastructure. - AzurePlatform PlatformType = "Azure" - - // PowerVSPlatform represents PowerVS infrastructure. - PowerVSPlatform PlatformType = "PowerVS" -) - -// PlatformSpec specifies the underlying infrastructure provider for the cluster -// and is used to configure platform specific behavior. -type PlatformSpec struct { - // Type is the type of infrastructure provider for the cluster. - // - // +unionDiscriminator - // +immutable - Type PlatformType `json:"type"` - - // AWS specifies configuration for clusters running on Amazon Web Services. - // - // +optional - // +immutable - AWS *AWSPlatformSpec `json:"aws,omitempty"` - - // Agent specifies configuration for agent-based installations. - // - // +optional - // +immutable - Agent *AgentPlatformSpec `json:"agent,omitempty"` - - // IBMCloud defines IBMCloud specific settings for components - IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` - - // Azure defines azure specific settings - Azure *AzurePlatformSpec `json:"azure,omitempty"` - - // PowerVS specifies configuration for clusters running on IBMCloud Power VS Service. - // This field is immutable. Once set, It can't be changed. - // - // +optional - // +immutable - PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` - - // KubeVirt defines KubeVirt specific settings for cluster components. - // - // +optional - // +immutable - Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` -} - -type KubevirtPlatformCredentials struct { - // InfraKubeConfigSecret is a reference to a secret that contains the kubeconfig for the external infra cluster - // that will be used to host the KubeVirt virtual machines for this cluster. - // - // +immutable - // +kubebuilder:validation:Required - // +required - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="infraKubeConfigSecret is immutable" - InfraKubeConfigSecret *KubeconfigSecretRef `json:"infraKubeConfigSecret,omitempty"` - - // InfraNamespace defines the namespace on the external infra cluster that is used to host the KubeVirt - // virtual machines. This namespace must already exist before creating the HostedCluster and the kubeconfig - // referenced in the InfraKubeConfigSecret must have access to manage the required resources within this - // namespace. - // - // +immutable - // +kubebuilder:validation:Required - // +required - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="infraNamespace is immutable" - InfraNamespace string `json:"infraNamespace"` -} - -// KubevirtPlatformSpec specifies configuration for kubevirt guest cluster installations -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.generateID) || has(self.generateID)", message="Kubevirt GenerateID is required once set" -type KubevirtPlatformSpec struct { - // BaseDomainPassthrough toggles whether or not an automatically - // generated base domain for the guest cluster should be used that - // is a subdomain of the management cluster's *.apps DNS. - // - // For the KubeVirt platform, the basedomain can be autogenerated using - // the *.apps domain of the management/infra hosting cluster - // This makes the guest cluster's base domain a subdomain of the - // hypershift infra/mgmt cluster's base domain. - // - // Example: - // Infra/Mgmt cluster's DNS - // Base: example.com - // Cluster: mgmt-cluster.example.com - // Apps: *.apps.mgmt-cluster.example.com - // KubeVirt Guest cluster's DNS - // Base: apps.mgmt-cluster.example.com - // Cluster: guest.apps.mgmt-cluster.example.com - // Apps: *.apps.guest.apps.mgmt-cluster.example.com - // - // This is possible using OCP wildcard routes - // - // +optional - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="baseDomainPassthrough is immutable" - BaseDomainPassthrough *bool `json:"baseDomainPassthrough,omitempty"` - - // GenerateID is used to uniquely apply a name suffix to resources associated with - // kubevirt infrastructure resources - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Kubevirt GenerateID is immutable once set" - // +kubebuilder:validation:MaxLength=11 - // +optional - GenerateID string `json:"generateID,omitempty"` - // Credentials defines the client credentials used when creating KubeVirt virtual machines. - // Defining credentials is only necessary when the KubeVirt virtual machines are being placed - // on a cluster separate from the one hosting the Hosted Control Plane components. - // - // The default behavior when Credentials is not defined is for the KubeVirt VMs to be placed on - // the same cluster and namespace as the Hosted Control Plane. - // +optional - Credentials *KubevirtPlatformCredentials `json:"credentials,omitempty"` - - // StorageDriver defines how the KubeVirt CSI driver exposes StorageClasses on - // the infra cluster (hosting the VMs) to the guest cluster. - // - // +kubebuilder:validation:Optional - // +optional - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver is immutable" - StorageDriver *KubevirtStorageDriverSpec `json:"storageDriver,omitempty"` -} - -// KubevirtStorageDriverConfigType defines how the kubevirt storage driver is configured. -// -// +kubebuilder:validation:Enum=None;Default;Manual -type KubevirtStorageDriverConfigType string - -const ( - // NoneKubevirtStorageDriverConfigType means no kubevirt storage driver is used - NoneKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "None" - - // DefaultKubevirtStorageDriverConfigType means the kubevirt storage driver maps to the - // underlying infra cluster's default storageclass - DefaultKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "Default" - - // ManualKubevirtStorageDriverConfigType means the kubevirt storage driver mapping is - // explicitly defined. - ManualKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "Manual" -) - -type KubevirtStorageDriverSpec struct { - // Type represents the type of kubevirt csi driver configuration to use - // - // +unionDiscriminator - // +immutable - // +kubebuilder:default=Default - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver.Type is immutable" - Type KubevirtStorageDriverConfigType `json:"type,omitempty"` - - // Manual is used to explicilty define how the infra storageclasses are - // mapped to guest storageclasses - // - // +immutable - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver.Manual is immutable" - Manual *KubevirtManualStorageDriverConfig `json:"manual,omitempty"` -} - -type KubevirtManualStorageDriverConfig struct { - // StorageClassMapping maps StorageClasses on the infra cluster hosting - // the KubeVirt VMs to StorageClasses that are made available within the - // Guest Cluster. - // - // NOTE: It is possible that not all capablities of an infra cluster's - // storageclass will be present for the corresponding guest clusters storageclass. - // - // +optional - // +immutable - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageClassMapping is immutable" - StorageClassMapping []KubevirtStorageClassMapping `json:"storageClassMapping,omitempty"` - - // +optional - // +immutable - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="volumeSnapshotClassMapping is immutable" - VolumeSnapshotClassMapping []KubevirtVolumeSnapshotClassMapping `json:"volumeSnapshotClassMapping,omitempty"` -} - -type KubevirtStorageClassMapping struct { - // Group contains which group this mapping belongs to. - Group string `json:"group,omitempty"` - // InfraStorageClassName is the name of the infra cluster storage class that - // will be exposed to the guest. - InfraStorageClassName string `json:"infraStorageClassName"` - // GuestStorageClassName is the name that the corresponding storageclass will - // be called within the guest cluster - GuestStorageClassName string `json:"guestStorageClassName"` -} - -type KubevirtVolumeSnapshotClassMapping struct { - // Group contains which group this mapping belongs to. - Group string `json:"group,omitempty"` - // InfraStorageClassName is the name of the infra cluster volume snapshot class that - // will be exposed to the guest. - InfraVolumeSnapshotClassName string `json:"infraVolumeSnapshotClassName"` - // GuestVolumeSnapshotClassName is the name that the corresponding volumeSnapshotClass will - // be called within the guest cluster - GuestVolumeSnapshotClassName string `json:"guestVolumeSnapshotClassName"` -} - -// AgentPlatformSpec specifies configuration for agent-based installations. -type AgentPlatformSpec struct { - // AgentNamespace is the namespace where to search for Agents for this cluster - AgentNamespace string `json:"agentNamespace"` -} - -// IBMCloudPlatformSpec defines IBMCloud specific settings for components -type IBMCloudPlatformSpec struct { - // ProviderType is a specific supported infrastructure provider within IBM Cloud. - ProviderType configv1.IBMCloudProviderType `json:"providerType,omitempty"` -} - -// PowerVSPlatformSpec defines IBMCloud PowerVS specific settings for components -type PowerVSPlatformSpec struct { - // AccountID is the IBMCloud account id. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - AccountID string `json:"accountID"` - - // CISInstanceCRN is the IBMCloud CIS Service Instance's Cloud Resource Name - // This field is immutable. Once set, It can't be changed. - // - // +kubebuilder:validation:Pattern=`^crn:` - // +immutable - CISInstanceCRN string `json:"cisInstanceCRN"` - - // ResourceGroup is the IBMCloud Resource Group in which the cluster resides. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - ResourceGroup string `json:"resourceGroup"` - - // Region is the IBMCloud region in which the cluster resides. This configures the - // OCP control plane cloud integrations, and is used by NodePool to resolve - // the correct boot image for a given release. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Region string `json:"region"` - - // Zone is the availability zone where control plane cloud resources are - // created. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Zone string `json:"zone"` - - // Subnet is the subnet to use for control plane cloud resources. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Subnet *PowerVSResourceReference `json:"subnet"` - - // ServiceInstance is the reference to the Power VS service on which the server instance(VM) will be created. - // Power VS service is a container for all Power VS instances at a specific geographic region. - // serviceInstance can be created via IBM Cloud catalog or CLI. - // ServiceInstanceID is the unique identifier that can be obtained from IBM Cloud UI or IBM Cloud cli. - // - // More detail about Power VS service instance. - // https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server - // - // This field is immutable. Once set, It can't be changed. - // - // +immutable - ServiceInstanceID string `json:"serviceInstanceID"` - - // VPC specifies IBM Cloud PowerVS Load Balancing configuration for the control - // plane. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - VPC *PowerVSVPC `json:"vpc"` - - // KubeCloudControllerCreds is a reference to a secret containing cloud - // credentials with permissions matching the cloud controller policy. - // This field is immutable. Once set, It can't be changed. - // - // TODO(dan): document the "cloud controller policy" - // - // +immutable - KubeCloudControllerCreds corev1.LocalObjectReference `json:"kubeCloudControllerCreds"` - - // NodePoolManagementCreds is a reference to a secret containing cloud - // credentials with permissions matching the node pool management policy. - // This field is immutable. Once set, It can't be changed. - // - // TODO(dan): document the "node pool management policy" - // - // +immutable - NodePoolManagementCreds corev1.LocalObjectReference `json:"nodePoolManagementCreds"` - - // IngressOperatorCloudCreds is a reference to a secret containing ibm cloud - // credentials for ingress operator to get authenticated with ibm cloud. - // - // +immutable - IngressOperatorCloudCreds corev1.LocalObjectReference `json:"ingressOperatorCloudCreds"` - - // StorageOperatorCloudCreds is a reference to a secret containing ibm cloud - // credentials for storage operator to get authenticated with ibm cloud. - // - // +immutable - StorageOperatorCloudCreds corev1.LocalObjectReference `json:"storageOperatorCloudCreds"` - - // ImageRegistryOperatorCloudCreds is a reference to a secret containing ibm cloud - // credentials for image registry operator to get authenticated with ibm cloud. - // - // +immutable - ImageRegistryOperatorCloudCreds corev1.LocalObjectReference `json:"imageRegistryOperatorCloudCreds"` -} - -// PowerVSVPC specifies IBM Cloud PowerVS LoadBalancer configuration for the control -// plane. -type PowerVSVPC struct { - // Name for VPC to used for all the service load balancer. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Name string `json:"name"` - - // Region is the IBMCloud region in which VPC gets created, this VPC used for all the ingress traffic - // into the OCP cluster. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Region string `json:"region"` - - // Zone is the availability zone where load balancer cloud resources are - // created. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - // +optional - Zone string `json:"zone,omitempty"` - - // Subnet is the subnet to use for load balancer. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - // +optional - Subnet string `json:"subnet,omitempty"` -} - -// PowerVSResourceReference is a reference to a specific IBMCloud PowerVS resource by ID, or Name. -// Only one of ID, or Name may be specified. Specifying more than one will result in -// a validation error. -type PowerVSResourceReference struct { - // ID of resource - // +optional - ID *string `json:"id,omitempty"` - - // Name of resource - // +optional - Name *string `json:"name,omitempty"` -} - -// AWSCloudProviderConfig specifies AWS networking configuration. -type AWSCloudProviderConfig struct { - // Subnet is the subnet to use for control plane cloud resources. - // - // +optional - Subnet *AWSResourceReference `json:"subnet,omitempty"` - - // Zone is the availability zone where control plane cloud resources are - // created. - // - // +optional - Zone string `json:"zone,omitempty"` - - // VPC is the VPC to use for control plane cloud resources. - VPC string `json:"vpc"` -} - -// AWSEndpointAccessType specifies the publishing scope of cluster endpoints. -type AWSEndpointAccessType string - -const ( - // Public endpoint access allows public API server access and public node - // communication with the control plane. - Public AWSEndpointAccessType = "Public" - - // PublicAndPrivate endpoint access allows public API server access and - // private node communication with the control plane. - PublicAndPrivate AWSEndpointAccessType = "PublicAndPrivate" - - // Private endpoint access allows only private API server access and private - // node communication with the control plane. - Private AWSEndpointAccessType = "Private" -) - -// AWSPlatformSpec specifies configuration for clusters running on Amazon Web Services. -type AWSPlatformSpec struct { - // Region is the AWS region in which the cluster resides. This configures the - // OCP control plane cloud integrations, and is used by NodePool to resolve - // the correct boot AMI for a given release. - // - // +immutable - Region string `json:"region"` - - // CloudProviderConfig specifies AWS networking configuration for the control - // plane. - // This is mainly used for cloud provider controller config: - // https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364 - // TODO(dan): should this be named AWSNetworkConfig? - // - // +optional - // +immutable - CloudProviderConfig *AWSCloudProviderConfig `json:"cloudProviderConfig,omitempty"` - - // ServiceEndpoints specifies optional custom endpoints which will override - // the default service endpoint of specific AWS Services. - // - // There must be only one ServiceEndpoint for a given service name. - // - // +optional - // +immutable - ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` - - // RolesRef contains references to various AWS IAM roles required to enable - // integrations such as OIDC. - // - // +immutable - RolesRef AWSRolesRef `json:"rolesRef"` - - // Deprecated - // This field will be removed in the next API release. - // Use RolesRef instead. - // +immutable - Roles []AWSRoleCredentials `json:"roles,omitempty"` - - // Deprecated - // This field will be removed in the next API release. - // Use RolesRef instead. - // - // +immutable - KubeCloudControllerCreds corev1.LocalObjectReference `json:"kubeCloudControllerCreds"` - - // Deprecated - // This field will be removed in the next API release. - // Use RolesRef instead. - // - // +immutable - NodePoolManagementCreds corev1.LocalObjectReference `json:"nodePoolManagementCreds"` - - // Deprecated - // This field will be removed in the next API release. - // Use RolesRef instead. - // - // +immutable - ControlPlaneOperatorCreds corev1.LocalObjectReference `json:"controlPlaneOperatorCreds"` - - // ResourceTags is a list of additional tags to apply to AWS resources created - // for the cluster. See - // https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for - // information on tagging AWS resources. AWS supports a maximum of 50 tags per - // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available - // for the user. - // - // +kubebuilder:validation:MaxItems=25 - // +listType=map - // +listMapKey=key - // +optional - ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` - - // EndpointAccess specifies the publishing scope of cluster endpoints. The - // default is Public. - // - // +kubebuilder:validation:Enum=Public;PublicAndPrivate;Private - // +kubebuilder:default=Public - // +optional - EndpointAccess AWSEndpointAccessType `json:"endpointAccess,omitempty"` - - // AdditionalAllowedPrincipals specifies a list of additional allowed principal ARNs - // to be added to the hosted control plane's VPC Endpoint Service to enable additional - // VPC Endpoint connection requests to be automatically accepted. - // See https://docs.aws.amazon.com/vpc/latest/privatelink/configure-endpoint-service.html - // for more details around VPC Endpoint Service allowed principals. - // - // +optional - AdditionalAllowedPrincipals []string `json:"additionalAllowedPrincipals,omitempty"` - - // MultiArch specifies whether the Hosted Cluster will be expected to support NodePools with different - // CPU architectures, i.e., supporting arm64 NodePools and supporting amd64 NodePools on the same Hosted Cluster. - // +kubebuilder:default=false - // +optional - MultiArch bool `json:"multiArch"` -} - -type AWSRoleCredentials struct { - ARN string `json:"arn"` - Namespace string `json:"namespace"` - Name string `json:"name"` -} - -// AWSResourceTag is a tag to apply to AWS resources created for the cluster. -type AWSResourceTag struct { - // Key is the key of the tag. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=128 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` - Key string `json:"key"` - // Value is the value of the tag. - // - // Some AWS service do not support empty values. Since tags are added to - // resources in many services, the length of the tag value must meet the - // requirements of all services. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` - Value string `json:"value"` -} - -// AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API. -type AWSRolesRef struct { - // The referenced role must have a trust relationship that allows it to be assumed via web identity. - // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. - // Example: - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Principal": { - // "Federated": "{{ .ProviderARN }}" - // }, - // "Action": "sts:AssumeRoleWithWebIdentity", - // "Condition": { - // "StringEquals": { - // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} - // } - // } - // } - // ] - // } - // - // IngressARN is an ARN value referencing a role appropriate for the Ingress Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "elasticloadbalancing:DescribeLoadBalancers", - // "tag:GetResources", - // "route53:ListHostedZones" - // ], - // "Resource": "*" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "route53:ChangeResourceRecordSets" - // ], - // "Resource": [ - // "arn:aws:route53:::PUBLIC_ZONE_ID", - // "arn:aws:route53:::PRIVATE_ZONE_ID" - // ] - // } - // ] - // } - IngressARN string `json:"ingressARN"` - - // ImageRegistryARN is an ARN value referencing a role appropriate for the Image Registry Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "s3:CreateBucket", - // "s3:DeleteBucket", - // "s3:PutBucketTagging", - // "s3:GetBucketTagging", - // "s3:PutBucketPublicAccessBlock", - // "s3:GetBucketPublicAccessBlock", - // "s3:PutEncryptionConfiguration", - // "s3:GetEncryptionConfiguration", - // "s3:PutLifecycleConfiguration", - // "s3:GetLifecycleConfiguration", - // "s3:GetBucketLocation", - // "s3:ListBucket", - // "s3:GetObject", - // "s3:PutObject", - // "s3:DeleteObject", - // "s3:ListBucketMultipartUploads", - // "s3:AbortMultipartUpload", - // "s3:ListMultipartUploadParts" - // ], - // "Resource": "*" - // } - // ] - // } - ImageRegistryARN string `json:"imageRegistryARN"` - - // StorageARN is an ARN value referencing a role appropriate for the Storage Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "ec2:AttachVolume", - // "ec2:CreateSnapshot", - // "ec2:CreateTags", - // "ec2:CreateVolume", - // "ec2:DeleteSnapshot", - // "ec2:DeleteTags", - // "ec2:DeleteVolume", - // "ec2:DescribeInstances", - // "ec2:DescribeSnapshots", - // "ec2:DescribeTags", - // "ec2:DescribeVolumes", - // "ec2:DescribeVolumesModifications", - // "ec2:DetachVolume", - // "ec2:ModifyVolume" - // ], - // "Resource": "*" - // } - // ] - // } - StorageARN string `json:"storageARN"` - - // NetworkARN is an ARN value referencing a role appropriate for the Network Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "ec2:DescribeInstances", - // "ec2:DescribeInstanceStatus", - // "ec2:DescribeInstanceTypes", - // "ec2:UnassignPrivateIpAddresses", - // "ec2:AssignPrivateIpAddresses", - // "ec2:UnassignIpv6Addresses", - // "ec2:AssignIpv6Addresses", - // "ec2:DescribeSubnets", - // "ec2:DescribeNetworkInterfaces" - // ], - // "Resource": "*" - // } - // ] - // } - NetworkARN string `json:"networkARN"` - - // KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Action": [ - // "ec2:DescribeInstances", - // "ec2:DescribeImages", - // "ec2:DescribeRegions", - // "ec2:DescribeRouteTables", - // "ec2:DescribeSecurityGroups", - // "ec2:DescribeSubnets", - // "ec2:DescribeVolumes", - // "ec2:CreateSecurityGroup", - // "ec2:CreateTags", - // "ec2:CreateVolume", - // "ec2:ModifyInstanceAttribute", - // "ec2:ModifyVolume", - // "ec2:AttachVolume", - // "ec2:AuthorizeSecurityGroupIngress", - // "ec2:CreateRoute", - // "ec2:DeleteRoute", - // "ec2:DeleteSecurityGroup", - // "ec2:DeleteVolume", - // "ec2:DetachVolume", - // "ec2:RevokeSecurityGroupIngress", - // "ec2:DescribeVpcs", - // "elasticloadbalancing:AddTags", - // "elasticloadbalancing:AttachLoadBalancerToSubnets", - // "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - // "elasticloadbalancing:CreateLoadBalancer", - // "elasticloadbalancing:CreateLoadBalancerPolicy", - // "elasticloadbalancing:CreateLoadBalancerListeners", - // "elasticloadbalancing:ConfigureHealthCheck", - // "elasticloadbalancing:DeleteLoadBalancer", - // "elasticloadbalancing:DeleteLoadBalancerListeners", - // "elasticloadbalancing:DescribeLoadBalancers", - // "elasticloadbalancing:DescribeLoadBalancerAttributes", - // "elasticloadbalancing:DetachLoadBalancerFromSubnets", - // "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - // "elasticloadbalancing:ModifyLoadBalancerAttributes", - // "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - // "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - // "elasticloadbalancing:AddTags", - // "elasticloadbalancing:CreateListener", - // "elasticloadbalancing:CreateTargetGroup", - // "elasticloadbalancing:DeleteListener", - // "elasticloadbalancing:DeleteTargetGroup", - // "elasticloadbalancing:DescribeListeners", - // "elasticloadbalancing:DescribeLoadBalancerPolicies", - // "elasticloadbalancing:DescribeTargetGroups", - // "elasticloadbalancing:DescribeTargetHealth", - // "elasticloadbalancing:ModifyListener", - // "elasticloadbalancing:ModifyTargetGroup", - // "elasticloadbalancing:RegisterTargets", - // "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - // "iam:CreateServiceLinkedRole", - // "kms:DescribeKey" - // ], - // "Resource": [ - // "*" - // ], - // "Effect": "Allow" - // } - // ] - // } - // +immutable - KubeCloudControllerARN string `json:"kubeCloudControllerARN"` - - // NodePoolManagementARN is an ARN value referencing a role appropriate for the CAPI Controller. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Action": [ - // "ec2:AllocateAddress", - // "ec2:AssociateRouteTable", - // "ec2:AttachInternetGateway", - // "ec2:AuthorizeSecurityGroupIngress", - // "ec2:CreateInternetGateway", - // "ec2:CreateNatGateway", - // "ec2:CreateRoute", - // "ec2:CreateRouteTable", - // "ec2:CreateSecurityGroup", - // "ec2:CreateSubnet", - // "ec2:CreateTags", - // "ec2:DeleteInternetGateway", - // "ec2:DeleteNatGateway", - // "ec2:DeleteRouteTable", - // "ec2:DeleteSecurityGroup", - // "ec2:DeleteSubnet", - // "ec2:DeleteTags", - // "ec2:DescribeAccountAttributes", - // "ec2:DescribeAddresses", - // "ec2:DescribeAvailabilityZones", - // "ec2:DescribeImages", - // "ec2:DescribeInstances", - // "ec2:DescribeInternetGateways", - // "ec2:DescribeNatGateways", - // "ec2:DescribeNetworkInterfaces", - // "ec2:DescribeNetworkInterfaceAttribute", - // "ec2:DescribeRouteTables", - // "ec2:DescribeSecurityGroups", - // "ec2:DescribeSubnets", - // "ec2:DescribeVpcs", - // "ec2:DescribeVpcAttribute", - // "ec2:DescribeVolumes", - // "ec2:DetachInternetGateway", - // "ec2:DisassociateRouteTable", - // "ec2:DisassociateAddress", - // "ec2:ModifyInstanceAttribute", - // "ec2:ModifyNetworkInterfaceAttribute", - // "ec2:ModifySubnetAttribute", - // "ec2:ReleaseAddress", - // "ec2:RevokeSecurityGroupIngress", - // "ec2:RunInstances", - // "ec2:TerminateInstances", - // "tag:GetResources", - // "ec2:CreateLaunchTemplate", - // "ec2:CreateLaunchTemplateVersion", - // "ec2:DescribeLaunchTemplates", - // "ec2:DescribeLaunchTemplateVersions", - // "ec2:DeleteLaunchTemplate", - // "ec2:DeleteLaunchTemplateVersions" - // ], - // "Resource": [ - // "*" - // ], - // "Effect": "Allow" - // }, - // { - // "Condition": { - // "StringLike": { - // "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" - // } - // }, - // "Action": [ - // "iam:CreateServiceLinkedRole" - // ], - // "Resource": [ - // "arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing" - // ], - // "Effect": "Allow" - // }, - // { - // "Action": [ - // "iam:PassRole" - // ], - // "Resource": [ - // "arn:*:iam::*:role/*-worker-role" - // ], - // "Effect": "Allow" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "kms:Decrypt", - // "kms:Encrypt", - // "kms:GenerateDataKey", - // "kms:GenerateDataKeyWithoutPlainText", - // "kms:DescribeKey" - // ], - // "Resource": "*" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "kms:RevokeGrant", - // "kms:CreateGrant", - // "kms:ListGrants" - // ], - // "Resource": "*", - // "Condition": { - // "Bool": { - // "kms:GrantIsForAWSResource": true - // } - // } - // } - // ] - // } - // - // +immutable - NodePoolManagementARN string `json:"nodePoolManagementARN"` - - // ControlPlaneOperatorARN is an ARN value referencing a role appropriate for the Control Plane Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "ec2:CreateVpcEndpoint", - // "ec2:DescribeVpcEndpoints", - // "ec2:ModifyVpcEndpoint", - // "ec2:DeleteVpcEndpoints", - // "ec2:CreateTags", - // "route53:ListHostedZones" - // ], - // "Resource": "*" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "route53:ChangeResourceRecordSets", - // "route53:ListResourceRecordSets" - // ], - // "Resource": "arn:aws:route53:::%s" - // } - // ] - // } - // +immutable - ControlPlaneOperatorARN string `json:"controlPlaneOperatorARN"` -} - -// AWSServiceEndpoint stores the configuration for services to -// override existing defaults of AWS Services. -type AWSServiceEndpoint struct { - // Name is the name of the AWS service. - // This must be provided and cannot be empty. - Name string `json:"name"` - - // URL is fully qualified URI with scheme https, that overrides the default generated - // endpoint for a client. - // This must be provided and cannot be empty. - // - // +kubebuilder:validation:Pattern=`^https://` - URL string `json:"url"` -} - -type AzurePlatformSpec struct { - Credentials corev1.LocalObjectReference `json:"credentials"` - // The cloud environment identifier, valid values could be found here: https://github.com/Azure/go-autorest/blob/4c0e21ca2bbb3251fe7853e6f9df6397f53dd419/autorest/azure/environments.go#L33 - // +kubebuilder:validation:Enum=AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud - // +kubebuilder:default="AzurePublicCloud" - Cloud string `json:"cloud,omitempty"` - Location string `json:"location"` - ResourceGroupName string `json:"resourceGroup"` - VnetID string `json:"vnetID"` - SubnetID string `json:"subnetID"` - SubscriptionID string `json:"subscriptionID"` - SecurityGroupID string `json:"securityGroupID"` -} - -// Release represents the metadata for an OCP release payload image. -type Release struct { - // Image is the image pullspec of an OCP release payload image. - // - // +kubebuilder:validation:Pattern=^(\w+\S+)$ - Image string `json:"image"` -} - -// ClusterAutoscaling specifies auto-scaling behavior that applies to all -// NodePools associated with a control plane. -type ClusterAutoscaling struct { - // MaxNodesTotal is the maximum allowable number of nodes across all NodePools - // for a HostedCluster. The autoscaler will not grow the cluster beyond this - // number. - // - // +kubebuilder:validation:Minimum=0 - MaxNodesTotal *int32 `json:"maxNodesTotal,omitempty"` - - // MaxPodGracePeriod is the maximum seconds to wait for graceful pod - // termination before scaling down a NodePool. The default is 600 seconds. - // - // +kubebuilder:validation:Minimum=0 - MaxPodGracePeriod *int32 `json:"maxPodGracePeriod,omitempty"` - - // MaxNodeProvisionTime is the maximum time to wait for node provisioning - // before considering the provisioning to be unsuccessful, expressed as a Go - // duration string. The default is 15 minutes. - // - // +kubebuilder:validation:Pattern=^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ - MaxNodeProvisionTime string `json:"maxNodeProvisionTime,omitempty"` - - // PodPriorityThreshold enables users to schedule "best-effort" pods, which - // shouldn't trigger autoscaler actions, but only run when there are spare - // resources available. The default is -10. - // - // See the following for more details: - // https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-does-cluster-autoscaler-work-with-pod-priority-and-preemption - // - // +optional - PodPriorityThreshold *int32 `json:"podPriorityThreshold,omitempty"` -} - -// EtcdManagementType is a enum specifying the strategy for managing the cluster's etcd instance -// +kubebuilder:validation:Enum=Managed;Unmanaged -type EtcdManagementType string - -const ( - // Managed means HyperShift should provision and operator the etcd cluster - // automatically. - Managed EtcdManagementType = "Managed" - - // Unmanaged means HyperShift will not provision or manage the etcd cluster, - // and the user is responsible for doing so. - Unmanaged EtcdManagementType = "Unmanaged" -) - -// EtcdSpec specifies configuration for a control plane etcd cluster. -type EtcdSpec struct { - // ManagementType defines how the etcd cluster is managed. - // - // +unionDiscriminator - // +immutable - ManagementType EtcdManagementType `json:"managementType"` - - // Managed specifies the behavior of an etcd cluster managed by HyperShift. - // - // +optional - // +immutable - Managed *ManagedEtcdSpec `json:"managed,omitempty"` - - // Unmanaged specifies configuration which enables the control plane to - // integrate with an eternally managed etcd cluster. - // - // +optional - // +immutable - Unmanaged *UnmanagedEtcdSpec `json:"unmanaged,omitempty"` -} - -// ManagedEtcdSpec specifies the behavior of an etcd cluster managed by -// HyperShift. -type ManagedEtcdSpec struct { - // Storage specifies how etcd data is persisted. - Storage ManagedEtcdStorageSpec `json:"storage"` -} - -// ManagedEtcdStorageType is a storage type for an etcd cluster. -// -// +kubebuilder:validation:Enum=PersistentVolume -type ManagedEtcdStorageType string - -const ( - // PersistentVolumeEtcdStorage uses PersistentVolumes for etcd storage. - PersistentVolumeEtcdStorage ManagedEtcdStorageType = "PersistentVolume" -) - -var ( - DefaultPersistentVolumeEtcdStorageSize resource.Quantity = resource.MustParse("8Gi") -) - -// ManagedEtcdStorageSpec describes the storage configuration for etcd data. -type ManagedEtcdStorageSpec struct { - // Type is the kind of persistent storage implementation to use for etcd. - // - // +immutable - // +unionDiscriminator - Type ManagedEtcdStorageType `json:"type"` - - // PersistentVolume is the configuration for PersistentVolume etcd storage. - // With this implementation, a PersistentVolume will be allocated for every - // etcd member (either 1 or 3 depending on the HostedCluster control plane - // availability configuration). - // - // +optional - PersistentVolume *PersistentVolumeEtcdStorageSpec `json:"persistentVolume,omitempty"` - - // RestoreSnapshotURL allows an optional URL to be provided where - // an etcd snapshot can be downloaded, for example a pre-signed URL - // referencing a storage service. - // This snapshot will be restored on initial startup, only when the etcd PV - // is empty. - // - // +optional - // +immutable - // +kubebuilder:validation:XValidation:rule="self.size() <= 1", message="RestoreSnapshotURL shouldn't contain more than 1 entry" - RestoreSnapshotURL []string `json:"restoreSnapshotURL,omitempty"` -} - -// PersistentVolumeEtcdStorageSpec is the configuration for PersistentVolume -// etcd storage. -type PersistentVolumeEtcdStorageSpec struct { - // StorageClassName is the StorageClass of the data volume for each etcd member. - // - // See https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. - // - // +optional - // +immutable - StorageClassName *string `json:"storageClassName,omitempty"` - - // Size is the minimum size of the data volume for each etcd member. - // - // +optional - // +kubebuilder:default="8Gi" - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Etcd PV storage size is immutable" - Size *resource.Quantity `json:"size,omitempty"` -} - -// UnmanagedEtcdSpec specifies configuration which enables the control plane to -// integrate with an eternally managed etcd cluster. -type UnmanagedEtcdSpec struct { - // Endpoint is the full etcd cluster client endpoint URL. For example: - // - // https://etcd-client:2379 - // - // If the URL uses an HTTPS scheme, the TLS field is required. - // - // +kubebuilder:validation:Pattern=`^https://` - Endpoint string `json:"endpoint"` - - // TLS specifies TLS configuration for HTTPS etcd client endpoints. - TLS EtcdTLSConfig `json:"tls"` -} - -// EtcdTLSConfig specifies TLS configuration for HTTPS etcd client endpoints. -type EtcdTLSConfig struct { - // ClientSecret refers to a secret for client mTLS authentication with the etcd cluster. It - // may have the following key/value pairs: - // - // etcd-client-ca.crt: Certificate Authority value - // etcd-client.crt: Client certificate value - // etcd-client.key: Client certificate key value - ClientSecret corev1.LocalObjectReference `json:"clientSecret"` -} - -// SecretEncryptionType defines the type of kube secret encryption being used. -// +kubebuilder:validation:Enum=kms;aescbc -type SecretEncryptionType string - -const ( - // KMS integrates with a cloud provider's key management service to do secret encryption - KMS SecretEncryptionType = "kms" - // AESCBC uses AES-CBC with PKCS#7 padding to do secret encryption - AESCBC SecretEncryptionType = "aescbc" -) - -// SecretEncryptionSpec contains metadata about the kubernetes secret encryption strategy being used for the -// cluster when applicable. -type SecretEncryptionSpec struct { - // Type defines the type of kube secret encryption being used - // +unionDiscriminator - Type SecretEncryptionType `json:"type"` - - // KMS defines metadata about the kms secret encryption strategy - // +optional - KMS *KMSSpec `json:"kms,omitempty"` - - // AESCBC defines metadata about the AESCBC secret encryption strategy - // +optional - AESCBC *AESCBCSpec `json:"aescbc,omitempty"` -} - -// KMSProvider defines the supported KMS providers -// +kubebuilder:validation:Enum=IBMCloud;AWS;Azure -type KMSProvider string - -const ( - IBMCloud KMSProvider = "IBMCloud" - AWS KMSProvider = "AWS" - AZURE KMSProvider = "Azure" -) - -// KMSSpec defines metadata about the kms secret encryption strategy -type KMSSpec struct { - // Provider defines the KMS provider - // +unionDiscriminator - Provider KMSProvider `json:"provider"` - // IBMCloud defines metadata for the IBM Cloud KMS encryption strategy - // +optional - IBMCloud *IBMCloudKMSSpec `json:"ibmcloud,omitempty"` - // AWS defines metadata about the configuration of the AWS KMS Secret Encryption provider - // +optional - AWS *AWSKMSSpec `json:"aws,omitempty"` - // Azure defines metadata about the configuration of the Azure KMS Secret Encryption provider using Azure key vault - // +optional - Azure *AzureKMSSpec `json:"azure,omitempty"` -} - -// AzureKMSSpec defines metadata about the configuration of the Azure KMS Secret Encryption provider using Azure key vault -type AzureKMSSpec struct { - // ActiveKey defines the active key used to encrypt new secrets - // - // +kubebuilder:validation:Required - ActiveKey AzureKMSKey `json:"activeKey"` - // BackupKey defines the old key during the rotation process so previously created - // secrets can continue to be decrypted until they are all re-encrypted with the active key. - // +optional - BackupKey *AzureKMSKey `json:"backupKey,omitempty"` -} - -type AzureKMSKey struct { - // KeyVaultName is the name of the keyvault. Must match criteria specified at https://docs.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates#vault-name-and-object-name - // Your Microsoft Entra application used to create the cluster must be authorized to access this keyvault, e.g using the AzureCLI: - // `az keyvault set-policy -n $KEYVAULT_NAME --key-permissions decrypt encrypt --spn ` - KeyVaultName string `json:"keyVaultName"` - // KeyName is the name of the keyvault key used for encrypt/decrypt - KeyName string `json:"keyName"` - // KeyVersion contains the version of the key to use - KeyVersion string `json:"keyVersion"` -} - -// IBMCloudKMSSpec defines metadata for the IBM Cloud KMS encryption strategy -type IBMCloudKMSSpec struct { - // Region is the IBM Cloud region - Region string `json:"region"` - // Auth defines metadata for how authentication is done with IBM Cloud KMS - Auth IBMCloudKMSAuthSpec `json:"auth"` - // KeyList defines the list of keys used for data encryption - KeyList []IBMCloudKMSKeyEntry `json:"keyList"` -} - -// IBMCloudKMSKeyEntry defines metadata for an IBM Cloud KMS encryption key -type IBMCloudKMSKeyEntry struct { - // CRKID is the customer rook key id - CRKID string `json:"crkID"` - // InstanceID is the id for the key protect instance - InstanceID string `json:"instanceID"` - // CorrelationID is an identifier used to track all api call usage from hypershift - CorrelationID string `json:"correlationID"` - // URL is the url to call key protect apis over - // +kubebuilder:validation:Pattern=`^https://` - URL string `json:"url"` - // KeyVersion is a unique number associated with the key. The number increments whenever a new - // key is enabled for data encryption. - KeyVersion int `json:"keyVersion"` -} - -// IBMCloudKMSAuthSpec defines metadata for how authentication is done with IBM Cloud KMS -type IBMCloudKMSAuthSpec struct { - // Type defines the IBM Cloud KMS authentication strategy - // +unionDiscriminator - Type IBMCloudKMSAuthType `json:"type"` - // Unmanaged defines the auth metadata the customer provides to interact with IBM Cloud KMS - // +optional - Unmanaged *IBMCloudKMSUnmanagedAuthSpec `json:"unmanaged,omitempty"` - // Managed defines metadata around the service to service authentication strategy for the IBM Cloud - // KMS system (all provider managed). - // +optional - Managed *IBMCloudKMSManagedAuthSpec `json:"managed,omitempty"` -} - -// IBMCloudKMSAuthType defines the IBM Cloud KMS authentication strategy -// +kubebuilder:validation:Enum=Managed;Unmanaged -type IBMCloudKMSAuthType string - -const ( - // IBMCloudKMSManagedAuth defines the KMS authentication strategy where the IKS/ROKS platform uses - // service to service auth to call IBM Cloud KMS APIs (no customer credentials requried) - IBMCloudKMSManagedAuth IBMCloudKMSAuthType = "Managed" - // IBMCloudKMSUnmanagedAuth defines the KMS authentication strategy where a customer supplies IBM Cloud - // authentication to interact with IBM Cloud KMS APIs - IBMCloudKMSUnmanagedAuth IBMCloudKMSAuthType = "Unmanaged" -) - -// IBMCloudKMSUnmanagedAuthSpec defines the auth metadata the customer provides to interact with IBM Cloud KMS -type IBMCloudKMSUnmanagedAuthSpec struct { - // Credentials should reference a secret with a key field of IBMCloudIAMAPIKeySecretKey that contains a apikey to - // call IBM Cloud KMS APIs - Credentials corev1.LocalObjectReference `json:"credentials"` -} - -// IBMCloudKMSManagedAuthSpec defines metadata around the service to service authentication strategy for the IBM Cloud -// KMS system (all provider managed). -type IBMCloudKMSManagedAuthSpec struct { -} - -// AWSKMSSpec defines metadata about the configuration of the AWS KMS Secret Encryption provider -type AWSKMSSpec struct { - // Region contains the AWS region - Region string `json:"region"` - // ActiveKey defines the active key used to encrypt new secrets - ActiveKey AWSKMSKeyEntry `json:"activeKey"` - // BackupKey defines the old key during the rotation process so previously created - // secrets can continue to be decrypted until they are all re-encrypted with the active key. - // +optional - BackupKey *AWSKMSKeyEntry `json:"backupKey,omitempty"` - // Auth defines metadata about the management of credentials used to interact with AWS KMS - Auth AWSKMSAuthSpec `json:"auth"` -} - -// AWSKMSAuthSpec defines metadata about the management of credentials used to interact with AWS KMS -type AWSKMSAuthSpec struct { - // Deprecated - // This field is deprecated and will be removed in a future release. Use AWSKMSRoleARN instead. - // Credentials contains the name of the secret that holds the aws credentials that can be used - // to make the necessary KMS calls. It should at key AWSCredentialsFileSecretKey contain the - // aws credentials file that can be used to configure AWS SDKs - Credentials corev1.LocalObjectReference `json:"credentials"` - - // The referenced role must have a trust relationship that allows it to be assumed via web identity. - // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. - // Example: - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Principal": { - // "Federated": "{{ .ProviderARN }}" - // }, - // "Action": "sts:AssumeRoleWithWebIdentity", - // "Condition": { - // "StringEquals": { - // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} - // } - // } - // } - // ] - // } - // - // AWSKMSARN is an ARN value referencing a role appropriate for managing the auth via the AWS KMS key. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "kms:Encrypt", - // "kms:Decrypt", - // "kms:ReEncrypt*", - // "kms:GenerateDataKey*", - // "kms:DescribeKey" - // ], - // "Resource": %q - // } - // ] - // } - AWSKMSRoleARN string `json:"awsKms"` -} - -// AWSKMSKeyEntry defines metadata to locate the encryption key in AWS -type AWSKMSKeyEntry struct { - // ARN is the Amazon Resource Name for the encryption key - // +kubebuilder:validation:Pattern=`^arn:` - ARN string `json:"arn"` -} - -// AESCBCSpec defines metadata about the AESCBC secret encryption strategy -type AESCBCSpec struct { - // ActiveKey defines the active key used to encrypt new secrets - ActiveKey corev1.LocalObjectReference `json:"activeKey"` - // BackupKey defines the old key during the rotation process so previously created - // secrets can continue to be decrypted until they are all re-encrypted with the active key. - // +optional - BackupKey *corev1.LocalObjectReference `json:"backupKey,omitempty"` -} - -// HostedClusterStatus is the latest observed status of a HostedCluster. -type HostedClusterStatus struct { - // Version is the status of the release version applied to the - // HostedCluster. - // +optional - Version *ClusterVersionStatus `json:"version,omitempty"` - - // KubeConfig is a reference to the secret containing the default kubeconfig - // for the cluster. - // +optional - KubeConfig *corev1.LocalObjectReference `json:"kubeconfig,omitempty"` - - // KubeadminPassword is a reference to the secret that contains the initial - // kubeadmin user password for the guest cluster. - // +optional - KubeadminPassword *corev1.LocalObjectReference `json:"kubeadminPassword,omitempty"` - - // IgnitionEndpoint is the endpoint injected in the ign config userdata. - // It exposes the config for instances to become kubernetes nodes. - // +optional - IgnitionEndpoint string `json:"ignitionEndpoint,omitempty"` - - // ControlPlaneEndpoint contains the endpoint information by which - // external clients can access the control plane. This is populated - // after the infrastructure is ready. - // +kubebuilder:validation:Optional - ControlPlaneEndpoint APIEndpoint `json:"controlPlaneEndpoint,omitempty"` - - // OAuthCallbackURLTemplate contains a template for the URL to use as a callback - // for identity providers. The [identity-provider-name] placeholder must be replaced - // with the name of an identity provider defined on the HostedCluster. - // This is populated after the infrastructure is ready. - // +kubebuilder:validation:Optional - OAuthCallbackURLTemplate string `json:"oauthCallbackURLTemplate,omitempty"` - - // Conditions represents the latest available observations of a control - // plane's current state. - // +optional - // +listType=map - // +listMapKey=type - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []metav1.Condition `json:"conditions,omitempty"` - - // Platform contains platform-specific status of the HostedCluster - // +optional - Platform *PlatformStatus `json:"platform,omitempty"` -} - -// PlatformStatus contains platform-specific status -type PlatformStatus struct { - // +optional - AWS *AWSPlatformStatus `json:"aws,omitempty"` -} - -// AWSPlatformStatus contains status specific to the AWS platform -type AWSPlatformStatus struct { - // DefaultWorkerSecurityGroupID is the ID of a security group created by - // the control plane operator. It is always added to worker machines in - // addition to any security groups specified in the NodePool. - // +optional - DefaultWorkerSecurityGroupID string `json:"defaultWorkerSecurityGroupID,omitempty"` -} - -// ClusterVersionStatus reports the status of the cluster versioning, -// including any upgrades that are in progress. The current field will -// be set to whichever version the cluster is reconciling to, and the -// conditions array will report whether the update succeeded, is in -// progress, or is failing. -// +k8s:deepcopy-gen=true -type ClusterVersionStatus struct { - // desired is the version that the cluster is reconciling towards. - // If the cluster is not yet fully initialized desired will be set - // with the information available, which may be an image or a tag. - Desired configv1.Release `json:"desired"` - - // history contains a list of the most recent versions applied to the cluster. - // This value may be empty during cluster startup, and then will be updated - // when a new update is being applied. The newest update is first in the - // list and it is ordered by recency. Updates in the history have state - // Completed if the rollout completed - if an update was failing or halfway - // applied the state will be Partial. Only a limited amount of update history - // is preserved. - // - // +optional - History []configv1.UpdateHistory `json:"history,omitempty"` - - // observedGeneration reports which version of the spec is being synced. - // If this value is not equal to metadata.generation, then the desired - // and conditions fields may represent a previous version. - ObservedGeneration int64 `json:"observedGeneration"` - - // availableUpdates contains updates recommended for this - // cluster. Updates which appear in conditionalUpdates but not in - // availableUpdates may expose this cluster to known issues. This list - // may be empty if no updates are recommended, if the update service - // is unavailable, or if an invalid channel has been specified. - // +nullable - // +kubebuilder:validation:Required - // +required - AvailableUpdates []configv1.Release `json:"availableUpdates"` - - // conditionalUpdates contains the list of updates that may be - // recommended for this cluster if it meets specific required - // conditions. Consumers interested in the set of updates that are - // actually recommended for this cluster should use - // availableUpdates. This list may be empty if no updates are - // recommended, if the update service is unavailable, or if an empty - // or invalid channel has been specified. - // +listType=atomic - // +optional - ConditionalUpdates []configv1.ConditionalUpdate `json:"conditionalUpdates,omitempty"` -} - -// ClusterConfiguration specifies configuration for individual OCP components in the -// cluster, represented as embedded resources that correspond to the openshift -// configuration API. -// -// The API for individual configuration items is at: -// https://docs.openshift.com/container-platform/4.7/rest_api/config_apis/config-apis-index.html -type ClusterConfiguration struct { - // SecretRefs holds references to any secrets referenced by configuration - // entries. Entries can reference the secrets using local object references. - // - // Deprecated - // This field is deprecated and will be removed in a future release - // - // +kubebuilder:validation:Optional - // +optional - SecretRefs []corev1.LocalObjectReference `json:"secretRefs,omitempty"` - - // ConfigMapRefs holds references to any configmaps referenced by - // configuration entries. Entries can reference the configmaps using local - // object references. - // - // Deprecated - // This field is deprecated and will be removed in a future release - // - // +kubebuilder:validation:Optional - // +optional - ConfigMapRefs []corev1.LocalObjectReference `json:"configMapRefs,omitempty"` - - // Items embeds the serialized configuration resources. - // - // Deprecated - // This field is deprecated and will be removed in a future release - // - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Optional - // +optional - Items []runtime.RawExtension `json:"items,omitempty"` - - // APIServer holds configuration (like serving certificates, client CA and CORS domains) - // shared by all API servers in the system, among them especially kube-apiserver - // and openshift-apiserver. - // +optional - APIServer *configv1.APIServerSpec `json:"apiServer,omitempty"` - - // Authentication specifies cluster-wide settings for authentication (like OAuth and - // webhook token authenticators). - // +optional - Authentication *configv1.AuthenticationSpec `json:"authentication,omitempty"` - - // FeatureGate holds cluster-wide information about feature gates. - // +optional - FeatureGate *configv1.FeatureGateSpec `json:"featureGate,omitempty"` - - // Image governs policies related to imagestream imports and runtime configuration - // for external registries. It allows cluster admins to configure which registries - // OpenShift is allowed to import images from, extra CA trust bundles for external - // registries, and policies to block or allow registry hostnames. - // When exposing OpenShift's image registry to the public, this also lets cluster - // admins specify the external hostname. - // +optional - Image *configv1.ImageSpec `json:"image,omitempty"` - - // Ingress holds cluster-wide information about ingress, including the default ingress domain - // used for routes. - // +optional - Ingress *configv1.IngressSpec `json:"ingress,omitempty"` - - // Network holds cluster-wide information about the network. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. - // Please view network.spec for an explanation on what applies when configuring this resource. - // TODO (csrwng): Add validation here to exclude changes that conflict with networking settings in the HostedCluster.Spec.Networking field. - // +optional - Network *configv1.NetworkSpec `json:"network,omitempty"` - - // OAuth holds cluster-wide information about OAuth. - // It is used to configure the integrated OAuth server. - // This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. - // +optional - OAuth *configv1.OAuthSpec `json:"oauth,omitempty"` - - // OperatorHub specifies the configuration for the Operator Lifecycle Manager in the HostedCluster. This is only configured at deployment time but the controller are not reconcilling over it. - // The OperatorHub configuration will be constantly reconciled if catalog placement is management, but only on cluster creation otherwise. - // - // +optional - OperatorHub *configv1.OperatorHubSpec `json:"operatorhub,omitempty"` - - // Scheduler holds cluster-wide config information to run the Kubernetes Scheduler - // and influence its placement decisions. The canonical name for this config is `cluster`. - // +optional - Scheduler *configv1.SchedulerSpec `json:"scheduler,omitempty"` - - // Proxy holds cluster-wide information on how to configure default proxies for the cluster. - // +optional - Proxy *configv1.ProxySpec `json:"proxy,omitempty"` -} - -// +genclient - -// HostedCluster is the primary representation of a HyperShift cluster and encapsulates -// the control plane and common data plane configuration. Creating a HostedCluster -// results in a fully functional OpenShift control plane with no attached nodes. -// To support workloads (e.g. pods), a HostedCluster may have one or more associated -// NodePool resources. -// -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=hostedclusters,shortName=hc;hcs,scope=Namespaced -// +kubebuilder:subresource:status -// +kubebuilder:deprecatedversion:warning="v1alpha1 is a deprecated version for HostedCluster" -// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version.history[?(@.state==\"Completed\")].version",description="Version" -// +kubebuilder:printcolumn:name="KubeConfig",type="string",JSONPath=".status.kubeconfig.name",description="KubeConfig Secret" -// +kubebuilder:printcolumn:name="Progress",type="string",JSONPath=".status.version.history[?(@.state!=\"\")].state",description="Progress" -// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].status",description="Available" -// +kubebuilder:printcolumn:name="Progressing",type="string",JSONPath=".status.conditions[?(@.type==\"Progressing\")].status",description="Progressing" -// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].message",description="Message" -type HostedCluster struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec is the desired behavior of the HostedCluster. - Spec HostedClusterSpec `json:"spec,omitempty"` - - // Status is the latest observed status of the HostedCluster. - Status HostedClusterStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true -// HostedClusterList contains a list of HostedCluster -type HostedClusterList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []HostedCluster `json:"items"` -} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/nodepool_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/nodepool_types.go deleted file mode 100644 index c5caa68ab..000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/nodepool_types.go +++ /dev/null @@ -1,1034 +0,0 @@ -package v1alpha1 - -import ( - "fmt" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - - "github.com/openshift/hypershift/api/ibmcapi" -) - -const ( - NodePoolValidGeneratedPayloadConditionType = "ValidGeneratedPayload" - NodePoolValidPlatformImageType = "ValidPlatformImage" - NodePoolValidHostedClusterConditionType = "ValidHostedCluster" - NodePoolValidReleaseImageConditionType = "ValidReleaseImage" - NodePoolValidMachineConfigConditionType = "ValidMachineConfig" - NodePoolValidTuningConfigConditionType = "ValidTuningConfig" - NodePoolUpdateManagementEnabledConditionType = "UpdateManagementEnabled" - NodePoolAutoscalingEnabledConditionType = "AutoscalingEnabled" - NodePoolReadyConditionType = "Ready" - NodePoolReconciliationActiveConditionType = "ReconciliationActive" - NodePoolAutorepairEnabledConditionType = "AutorepairEnabled" - NodePoolUpdatingVersionConditionType = "UpdatingVersion" - NodePoolUpdatingConfigConditionType = "UpdatingConfig" - NodePoolAsExpectedConditionReason = "AsExpected" - NodePoolValidationFailedConditionReason = "ValidationFailed" - NodePoolInplaceUpgradeFailedConditionReason = "InplaceUpgradeFailed" - NodePoolNotFoundReason = "NotFound" - NodePoolFailedToGetReason = "FailedToGet" - NodePoolValidArchPlatform = "ValidArchPlatform" - // NodePoolLabel is used to label Nodes. - NodePoolLabel = "hypershift.openshift.io/nodePool" -) - -// The following are reasons for the IgnitionEndpointAvailable condition. -const ( - IgnitionEndpointMissingReason string = "IgnitionEndpointMissing" - IgnitionCACertMissingReason string = "IgnitionCACertMissing" -) - -const ( - // IgnitionServerTokenExpirationTimestampAnnotation holds the time that a ignition token expires and should be - // removed from the cluster. - IgnitionServerTokenExpirationTimestampAnnotation = "hypershift.openshift.io/ignition-token-expiration-timestamp" -) - -const ( - ArchitectureAMD64 = "amd64" - ArchitectureS390X = "s390x" - ArchitecturePPC64LE = "ppc64le" - ArchitectureARM64 = "arm64" -) - -var ( - // ArchAliases contains the RHCOS release metadata aliases for the different architectures supported as API input. - ArchAliases = map[string]string{ - ArchitectureAMD64: "x86_64", - ArchitectureARM64: "aarch64", - } -) - -func init() { - SchemeBuilder.Register(func(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &NodePool{}, - &NodePoolList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil - }) -} - -// +genclient - -// NodePool is a scalable set of worker nodes attached to a HostedCluster. -// NodePool machine architectures are uniform within a given pool, and are -// independent of the control plane’s underlying machine architecture. -// -// +kubebuilder:resource:path=nodepools,shortName=np;nps,scope=Namespaced -// +kubebuilder:deprecatedversion:warning="v1alpha1 is a deprecated version for NodePool" -// +kubebuilder:subresource:status -// +kubebuilder:object:root=true -// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas -// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.clusterName",description="Cluster" -// +kubebuilder:printcolumn:name="Desired Nodes",type="integer",JSONPath=".spec.replicas",description="Desired Nodes" -// +kubebuilder:printcolumn:name="Current Nodes",type="integer",JSONPath=".status.replicas",description="Available Nodes" -// +kubebuilder:printcolumn:name="Autoscaling",type="string",JSONPath=".status.conditions[?(@.type==\"AutoscalingEnabled\")].status",description="Autoscaling Enabled" -// +kubebuilder:printcolumn:name="Autorepair",type="string",JSONPath=".status.conditions[?(@.type==\"AutorepairEnabled\")].status",description="Node Autorepair Enabled" -// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="Current version" -// +kubebuilder:printcolumn:name="UpdatingVersion",type="string",JSONPath=".status.conditions[?(@.type==\"UpdatingVersion\")].status",description="UpdatingVersion in progress" -// +kubebuilder:printcolumn:name="UpdatingConfig",type="string",JSONPath=".status.conditions[?(@.type==\"UpdatingConfig\")].status",description="UpdatingConfig in progress" -// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="Message" -type NodePool struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec is the desired behavior of the NodePool. - Spec NodePoolSpec `json:"spec,omitempty"` - - // Status is the latest observed status of the NodePool. - Status NodePoolStatus `json:"status,omitempty"` -} - -// NodePoolSpec is the desired behavior of a NodePool. -type NodePoolSpec struct { - // ClusterName is the name of the HostedCluster this NodePool belongs to. - // - // TODO(dan): Should this be a LocalObjectReference? - // - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="ClusterName is immutable" - ClusterName string `json:"clusterName"` - - // Release specifies the OCP release used for the NodePool. This informs the - // ignition configuration for machines, as well as other platform specific - // machine properties (e.g. an AMI on the AWS platform). - Release Release `json:"release"` - - // Platform specifies the underlying infrastructure provider for the NodePool - // and is used to configure platform specific behavior. - // - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Platform is immutable" - Platform NodePoolPlatform `json:"platform"` - - // Deprecated: Use Replicas instead. NodeCount will be dropped in the next - // api release. - // - // +optional - NodeCount *int32 `json:"nodeCount,omitempty"` - - // Replicas is the desired number of nodes the pool should maintain. If - // unset, the default value is 0. - // - // +optional - Replicas *int32 `json:"replicas,omitempty"` - - // Management specifies behavior for managing nodes in the pool, such as - // upgrade strategies and auto-repair behaviors. - Management NodePoolManagement `json:"management"` - - // Autoscaling specifies auto-scaling behavior for the NodePool. - // - // +optional - AutoScaling *NodePoolAutoScaling `json:"autoScaling,omitempty"` - - // Config is a list of references to ConfigMaps containing serialized - // MachineConfig resources to be injected into the ignition configurations of - // nodes in the NodePool. The MachineConfig API schema is defined here: - // - // https://github.com/openshift/machine-config-operator/blob/18963e4f8fe66e8c513ca4b131620760a414997f/pkg/apis/machineconfiguration.openshift.io/v1/types.go#L185 - // - // Each ConfigMap must have a single key named "config" whose value is the - // JSON or YAML of a serialized MachineConfig. - // +kubebuilder:validation:Optional - Config []corev1.LocalObjectReference `json:"config,omitempty"` - - // NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. - // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - // TODO (alberto): Today changing this field will trigger a recreate rolling update, which kind of defeats - // the purpose of the change. In future we plan to propagate this field in-place. - // https://github.com/kubernetes-sigs/cluster-api/issues/5880 - // +optional - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` - - // NodeVolumeDetachTimeout is the maximum amount of time that the controller will spend on detaching volume from a node. - // After the timeout, volumes that haven't been detached are skipped. - // +optional - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` - - // NodeLabels propagates a list of labels to Nodes, only once on creation. - // Valid values are those in https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set - // +optional - NodeLabels map[string]string `json:"nodeLabels,omitempty"` - - // Taints if specified, propagates a list of taints to Nodes, only once on creation. - // +optional - Taints []Taint `json:"taints,omitempty"` - - // PausedUntil is a field that can be used to pause reconciliation on a resource. - // Either a date can be provided in RFC3339 format or a boolean. If a date is - // provided: reconciliation is paused on the resource until that date. If the boolean true is - // provided: reconciliation is paused on the resource until the field is removed. - // +optional - PausedUntil *string `json:"pausedUntil,omitempty"` - - // TuningConfig is a list of references to ConfigMaps containing serialized - // Tuned resources to define the tuning configuration to be applied to - // nodes in the NodePool. The Tuned API is defined here: - // - // https://github.com/openshift/cluster-node-tuning-operator/blob/2c76314fb3cc8f12aef4a0dcd67ddc3677d5b54f/pkg/apis/tuned/v1/tuned_types.go - // - // Each ConfigMap must have a single key named "tuned" whose value is the - // JSON or YAML of a serialized Tuned. - // +kubebuilder:validation:Optional - TuningConfig []corev1.LocalObjectReference `json:"tuningConfig,omitempty"` - - // Arch is the preferred processor architecture for the NodePool (currently only supported on AWS) - // NOTE: This is set as optional to prevent validation from failing due to a limitation on client side validation with open API machinery: - // https://github.com/kubernetes/kubernetes/issues/108768#issuecomment-1253912215 - // TODO Add ppc64le and s390x to enum validation once the architectures are supported - // - // +kubebuilder:default:=amd64 - // +kubebuilder:validation:Enum=arm64;amd64 - // +optional - Arch string `json:"arch,omitempty"` -} - -// NodePoolStatus is the latest observed status of a NodePool. -type NodePoolStatus struct { - // Replicas is the latest observed number of nodes in the pool. - // - // +optional - Replicas int32 `json:"replicas"` - - // Version is the semantic version of the latest applied release specified by - // the NodePool. - // - // +kubebuilder:validation:Optional - Version string `json:"version,omitempty"` - - // Platform hols the specific statuses - Platform *NodePoolPlatformStatus `json:"platform,omitempty"` - - // Conditions represents the latest available observations of the node pool's - // current state. - // +optional - Conditions []NodePoolCondition `json:"conditions,omitempty"` -} - -// NodePoolList contains a list of NodePools. -// -// +kubebuilder:object:root=true -type NodePoolList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []NodePool `json:"items"` -} - -// UpgradeType is a type of high-level upgrade behavior nodes in a NodePool. -type UpgradeType string - -const ( - // UpgradeTypeReplace is a strategy which replaces nodes using surge node - // capacity. - UpgradeTypeReplace = UpgradeType("Replace") - - // UpgradeTypeInPlace is a strategy which replaces nodes in-place with no - // additional node capacity requirements. - UpgradeTypeInPlace = UpgradeType("InPlace") -) - -// UpgradeStrategy is a specific strategy for upgrading nodes in a NodePool. -type UpgradeStrategy string - -const ( - // UpgradeStrategyRollingUpdate means use a rolling update for nodes. - UpgradeStrategyRollingUpdate = UpgradeStrategy("RollingUpdate") - - // UpgradeStrategyOnDelete replaces old nodes when the deletion of the - // associated node instances are completed. - UpgradeStrategyOnDelete = UpgradeStrategy("OnDelete") -) - -// ReplaceUpgrade specifies upgrade behavior that replaces existing nodes -// according to a given strategy. -type ReplaceUpgrade struct { - // Strategy is the node replacement strategy for nodes in the pool. - // - // +kubebuilder:validation:Optional - // +kubebuilder:validation:Enum=RollingUpdate;OnDelete - Strategy UpgradeStrategy `json:"strategy"` - - // RollingUpdate specifies a rolling update strategy which upgrades nodes by - // creating new nodes and deleting the old ones. - // - // +kubebuilder:validation:Optional - RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"` -} - -// RollingUpdate specifies a rolling update strategy which upgrades nodes by -// creating new nodes and deleting the old ones. -type RollingUpdate struct { - // MaxUnavailable is the maximum number of nodes that can be unavailable - // during the update. - // - // Value can be an absolute number (ex: 5) or a percentage of desired nodes - // (ex: 10%). - // - // Absolute number is calculated from percentage by rounding down. - // - // This can not be 0 if MaxSurge is 0. - // - // Defaults to 0. - // - // Example: when this is set to 30%, old nodes can be deleted down to 70% of - // desired nodes immediately when the rolling update starts. Once new nodes - // are ready, more old nodes be deleted, followed by provisioning new nodes, - // ensuring that the total number of nodes available at all times during the - // update is at least 70% of desired nodes. - // - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` - - // MaxSurge is the maximum number of nodes that can be provisioned above the - // desired number of nodes. - // - // Value can be an absolute number (ex: 5) or a percentage of desired nodes - // (ex: 10%). - // - // Absolute number is calculated from percentage by rounding up. - // - // This can not be 0 if MaxUnavailable is 0. - // - // Defaults to 1. - // - // Example: when this is set to 30%, new nodes can be provisioned immediately - // when the rolling update starts, such that the total number of old and new - // nodes do not exceed 130% of desired nodes. Once old nodes have been - // deleted, new nodes can be provisioned, ensuring that total number of nodes - // running at any time during the update is at most 130% of desired nodes. - // - // +optional - MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` -} - -// InPlaceUpgrade specifies an upgrade strategy which upgrades nodes in-place -// without any new nodes being created or any old nodes being deleted. -type InPlaceUpgrade struct { - // MaxUnavailable is the maximum number of nodes that can be unavailable - // during the update. - // - // Value can be an absolute number (ex: 5) or a percentage of desired nodes - // (ex: 10%). - // - // Absolute number is calculated from percentage by rounding down. - // - // Defaults to 1. - // - // Example: when this is set to 30%, a max of 30% of the nodes can be made - // unschedulable/unavailable immediately when the update starts. Once a set - // of nodes is updated, more nodes can be made unschedulable for update, - // ensuring that the total number of nodes schedulable at all times during - // the update is at least 70% of desired nodes. - // - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` -} - -// NodePoolManagement specifies behavior for managing nodes in a NodePool, such -// as upgrade strategies and auto-repair behaviors. -type NodePoolManagement struct { - // UpgradeType specifies the type of strategy for handling upgrades. - // - // +kubebuilder:validation:Enum=Replace;InPlace - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="UpgradeType is immutable" - UpgradeType UpgradeType `json:"upgradeType"` - - // Replace is the configuration for rolling upgrades. - // - // +kubebuilder:validation:Optional - // +kubebuilder:default={strategy: "RollingUpdate", rollingUpdate: {maxSurge: 1, maxUnavailable: 0 }} - Replace *ReplaceUpgrade `json:"replace,omitempty"` - - // InPlace is the configuration for in-place upgrades. - // - // +kubebuilder:validation:Optional - InPlace *InPlaceUpgrade `json:"inPlace,omitempty"` - - // AutoRepair specifies whether health checks should be enabled for machines - // in the NodePool. The default is false. - // - // +optional - // +kubebuilder:default=false - AutoRepair bool `json:"autoRepair"` -} - -// NodePoolAutoScaling specifies auto-scaling behavior for a NodePool. -type NodePoolAutoScaling struct { - // Min is the minimum number of nodes to maintain in the pool. Must be >= 1. - // - // +kubebuilder:validation:Minimum=1 - Min int32 `json:"min"` - - // Max is the maximum number of nodes allowed in the pool. Must be >= 1. - // - // +kubebuilder:validation:Minimum=1 - Max int32 `json:"max"` -} - -// NodePoolPlatform specifies the underlying infrastructure provider for the -// NodePool and is used to configure platform specific behavior. -type NodePoolPlatform struct { - // Type specifies the platform name. - // - // +unionDiscriminator - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Type is immutable" - Type PlatformType `json:"type"` - - // AWS specifies the configuration used when operating on AWS. - // - // +optional - AWS *AWSNodePoolPlatform `json:"aws,omitempty"` - - // IBMCloud defines IBMCloud specific settings for components - IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` - - // Kubevirt specifies the configuration used when operating on KubeVirt platform. - // - // +optional - Kubevirt *KubevirtNodePoolPlatform `json:"kubevirt,omitempty"` - - // Agent specifies the configuration used when using Agent platform. - // - // +optional - Agent *AgentNodePoolPlatform `json:"agent,omitempty"` - - Azure *AzureNodePoolPlatform `json:"azure,omitempty"` - - // PowerVS specifies the configuration used when using IBMCloud PowerVS platform. - // - // +optional - PowerVS *PowerVSNodePoolPlatform `json:"powervs,omitempty"` -} - -// PowerVSNodePoolProcType defines processor type to be used for PowerVSNodePoolPlatform -type PowerVSNodePoolProcType string - -func (p *PowerVSNodePoolProcType) String() string { - return string(*p) -} - -func (p *PowerVSNodePoolProcType) Set(s string) error { - switch s { - case string(PowerVSNodePoolSharedProcType), string(PowerVSNodePoolCappedProcType), string(PowerVSNodePoolDedicatedProcType): - *p = PowerVSNodePoolProcType(s) - return nil - default: - return fmt.Errorf("unknown processor type used %s", s) - } -} - -func (p *PowerVSNodePoolProcType) Type() string { - return "PowerVSNodePoolProcType" -} - -const ( - // PowerVSNodePoolDedicatedProcType defines dedicated processor type - PowerVSNodePoolDedicatedProcType = PowerVSNodePoolProcType("dedicated") - - // PowerVSNodePoolSharedProcType defines shared processor type - PowerVSNodePoolSharedProcType = PowerVSNodePoolProcType("shared") - - // PowerVSNodePoolCappedProcType defines capped processor type - PowerVSNodePoolCappedProcType = PowerVSNodePoolProcType("capped") -) - -func (p *PowerVSNodePoolProcType) CastToCAPIPowerVSProcessorType() ibmcapi.PowerVSProcessorType { - switch *p { - case PowerVSNodePoolDedicatedProcType: - return ibmcapi.PowerVSProcessorTypeDedicated - case PowerVSNodePoolCappedProcType: - return ibmcapi.PowerVSProcessorTypeCapped - default: - return ibmcapi.PowerVSProcessorTypeShared - } -} - -// PowerVSNodePoolStorageType defines storage type to be used for PowerVSNodePoolPlatform -type PowerVSNodePoolStorageType string - -// PowerVSNodePoolImageDeletePolicy defines image delete policy to be used for PowerVSNodePoolPlatform -type PowerVSNodePoolImageDeletePolicy string - -// PowerVSNodePoolPlatform specifies the configuration of a NodePool when operating -// on IBMCloud PowerVS platform. -type PowerVSNodePoolPlatform struct { - // SystemType is the System type used to host the instance. - // systemType determines the number of cores and memory that is available. - // Few of the supported SystemTypes are s922,e880,e980. - // e880 systemType available only in Dallas Datacenters. - // e980 systemType available in Datacenters except Dallas and Washington. - // When omitted, this means that the user has no opinion and the platform is left to choose a - // reasonable default. The current default is s922 which is generally available. - // - // +optional - // +kubebuilder:default=s922 - SystemType string `json:"systemType,omitempty"` - - // ProcessorType is the VM instance processor type. - // It must be set to one of the following values: Dedicated, Capped or Shared. - // - // Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. - // Shared: Shared among other clients. - // Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. - // - // if the processorType is selected as Dedicated, then Processors value cannot be fractional. - // When omitted, this means that the user has no opinion and the platform is left to choose a - // reasonable default. The current default is shared. - // - // +kubebuilder:default=shared - // +kubebuilder:validation:Enum=dedicated;shared;capped - // +optional - ProcessorType PowerVSNodePoolProcType `json:"processorType,omitempty"` - - // Processors is the number of virtual processors in a virtual machine. - // when the processorType is selected as Dedicated the processors value cannot be fractional. - // maximum value for the Processors depends on the selected SystemType. - // when SystemType is set to e880 or e980 maximum Processors value is 143. - // when SystemType is set to s922 maximum Processors value is 15. - // minimum value for Processors depends on the selected ProcessorType. - // when ProcessorType is set as Shared or Capped, The minimum processors is 0.5. - // when ProcessorType is set as Dedicated, The minimum processors is 1. - // When omitted, this means that the user has no opinion and the platform is left to choose a - // reasonable default. The default is set based on the selected ProcessorType. - // when ProcessorType selected as Dedicated, the default is set to 1. - // when ProcessorType selected as Shared or Capped, the default is set to 0.5. - // - // +optional - // +kubebuilder:default="0.5" - Processors intstr.IntOrString `json:"processors,omitempty"` - - // MemoryGiB is the size of a virtual machine's memory, in GiB. - // maximum value for the MemoryGiB depends on the selected SystemType. - // when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB. - // when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB. - // when SystemType is set to s922 maximum MemoryGiB value is 942 GiB. - // The minimum memory is 32 GiB. - // - // When omitted, this means the user has no opinion and the platform is left to choose a reasonable - // default. The current default is 32. - // - // +optional - // +kubebuilder:default=32 - MemoryGiB int32 `json:"memoryGiB,omitempty"` - - // Image used for deploying the nodes. If unspecified, the default - // is chosen based on the NodePool release payload image. - // - // +optional - Image *PowerVSResourceReference `json:"image,omitempty"` - - // StorageType for the image and nodes, this will be ignored if Image is specified. - // The storage tiers in PowerVS are based on I/O operations per second (IOPS). - // It means that the performance of your storage volumes is limited to the maximum number of IOPS based on volume size and storage tier. - // Although, the exact numbers might change over time, the Tier 3 storage is currently set to 3 IOPS/GB, and the Tier 1 storage is currently set to 10 IOPS/GB. - // - // The default is tier1 - // - // +kubebuilder:default=tier1 - // +kubebuilder:validation:Enum=tier1;tier3 - // +optional - StorageType PowerVSNodePoolStorageType `json:"storageType,omitempty"` - - // ImageDeletePolicy is policy for the image deletion. - // - // delete: delete the image from the infrastructure. - // retain: delete the image from the openshift but retain in the infrastructure. - // - // The default is delete - // - // +kubebuilder:default=delete - // +kubebuilder:validation:Enum=delete;retain - // +optional - ImageDeletePolicy PowerVSNodePoolImageDeletePolicy `json:"imageDeletePolicy,omitempty"` -} - -type QoSClass string - -const ( - QoSClassBurstable QoSClass = "Burstable" - QoSClassGuaranteed QoSClass = "Guaranteed" -) - -// KubevirtCompute contains values associated with the virtual compute hardware requested for the VM. -type KubevirtCompute struct { - // Memory represents how much guest memory the VM should have - // - // +optional - // +kubebuilder:default="8Gi" - Memory *resource.Quantity `json:"memory"` - - // Cores represents how many cores the guest VM should have - // - // +optional - // +kubebuilder:default=2 - Cores *uint32 `json:"cores"` - - // QosClass If set to "Guaranteed", requests the scheduler to place the VirtualMachineInstance on a node with - // limit memory and CPU, equal to be the requested values, to set the VMI as a Guaranteed QoS Class; - // See here for more details: - // https://kubevirt.io/user-guide/operations/node_overcommit/#requesting-the-right-qos-class-for-virtualmachineinstances - // - // +optional - // +kubebuilder:validation:Enum=Burstable;Guaranteed - // +kubebuilder:default=Burstable - QosClass *QoSClass `json:"qosClass,omitempty"` -} - -// +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany;ReadOnly;ReadWriteOncePod -type PersistentVolumeAccessMode corev1.PersistentVolumeAccessMode - -// KubevirtPersistentVolume contains the values involved with provisioning persistent storage for a KubeVirt VM. -type KubevirtPersistentVolume struct { - // Size is the size of the persistent storage volume - // - // +optional - // +kubebuilder:default="32Gi" - Size *resource.Quantity `json:"size"` - // StorageClass is the storageClass used for the underlying PVC that hosts the volume - // - // +optional - StorageClass *string `json:"storageClass,omitempty"` - // AccessModes is an array that contains the desired Access Modes the root volume should have. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes - // - // +optional - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` - // VolumeMode defines what type of volume is required by the claim. - // Value of Filesystem is implied when not included in claim spec. - // +optional - // +kubebuilder:validation:Enum=Filesystem;Block - VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"` -} - -// KubevirtCachingStrategyType is the type of the boot image caching mechanism for the KubeVirt provider -type KubevirtCachingStrategyType string - -const ( - // KubevirtCachingStrategyNone means that hypershift will not cache the boot image - KubevirtCachingStrategyNone KubevirtCachingStrategyType = "None" - - // KubevirtCachingStrategyPVC means that hypershift will cache the boot image into a PVC; only relevant when using - // a QCOW boot image, and is ignored when using a container image - KubevirtCachingStrategyPVC KubevirtCachingStrategyType = "PVC" -) - -// KubevirtCachingStrategy defines the boot image caching strategy -type KubevirtCachingStrategy struct { - // Type is the type of the caching strategy - // +kubebuilder:default=None - // +kubebuilder:validation:Enum=None;PVC - Type KubevirtCachingStrategyType `json:"type"` -} - -// KubevirtRootVolume represents the volume that the rhcos disk will be stored and run from. -type KubevirtRootVolume struct { - // Image represents what rhcos image to use for the node pool - // - // +optional - Image *KubevirtDiskImage `json:"diskImage,omitempty"` - - // KubevirtVolume represents of type of storage to run the image on - KubevirtVolume `json:",inline"` - - // CacheStrategy defines the boot image caching strategy. Default - no caching - // +optional - CacheStrategy *KubevirtCachingStrategy `json:"cacheStrategy,omitempty"` -} - -// KubevirtVolumeType is a specific supported KubeVirt volumes -// -// +kubebuilder:validation:Enum=Persistent -type KubevirtVolumeType string - -const ( - // KubevirtVolumeTypePersistent represents persistent volume for kubevirt VMs - KubevirtVolumeTypePersistent KubevirtVolumeType = "Persistent" -) - -// KubevirtVolume represents what kind of storage to use for a KubeVirt VM volume -type KubevirtVolume struct { - // Type represents the type of storage to associate with the kubevirt VMs. - // - // +optional - // +unionDiscriminator - // +kubebuilder:default=Persistent - Type KubevirtVolumeType `json:"type"` - - // Persistent volume type means the VM's storage is backed by a PVC - // VMs that use persistent volumes can survive disruption events like restart and eviction - // This is the default type used when no storage type is defined. - // - // +optional - Persistent *KubevirtPersistentVolume `json:"persistent,omitempty"` -} - -// KubevirtDiskImage contains values representing where the rhcos image is located -type KubevirtDiskImage struct { - // ContainerDiskImage is a string representing the container image that holds the root disk - // - // +optional - ContainerDiskImage *string `json:"containerDiskImage,omitempty"` -} - -type MultiQueueSetting string - -const ( - MultiQueueEnable MultiQueueSetting = "Enable" - MultiQueueDisable MultiQueueSetting = "Disable" -) - -// KubevirtNodePoolPlatform specifies the configuration of a NodePool when operating -// on KubeVirt platform. -type KubevirtNodePoolPlatform struct { - // RootVolume represents values associated with the VM volume that will host rhcos - // +kubebuilder:default={persistent: {size: "32Gi"}, type: "Persistent"} - RootVolume *KubevirtRootVolume `json:"rootVolume"` - - // Compute contains values representing the virtual hardware requested for the VM - // - // +optional - // +kubebuilder:default={memory: "8Gi", cores: 2} - Compute *KubevirtCompute `json:"compute"` - - // NetworkInterfaceMultiQueue If set to "Enable", virtual network interfaces configured with a virtio bus will also - // enable the vhost multiqueue feature for network devices. The number of queues created depends on additional - // factors of the VirtualMachineInstance, like the number of guest CPUs. - // - // +optional - // +kubebuilder:validation:Enum=Enable;Disable - // +kubebuilder:default=Enable - NetworkInterfaceMultiQueue *MultiQueueSetting `json:"networkInterfaceMultiqueue,omitempty"` - - // AdditionalNetworks specify the extra networks attached to the nodes - // - // +optional - AdditionalNetworks []KubevirtNetwork `json:"additionalNetworks,omitempty"` - - // AttachDefaultNetwork specify if the default pod network should be attached to the nodes - // this can only be set to false if AdditionalNetworks are configured - // - // +optional - // +kubebuilder:default=true - AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` - - // NodeSelector is a selector which must be true for the kubevirt VirtualMachine to fit on a node. - // Selector which must match a node's labels for the VM to be scheduled on that node. More info: - // https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - // - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // KubevirtHostDevices specifies the host devices (e.g. GPU devices) to be passed - // from the management cluster, to the nodepool nodes - KubevirtHostDevices []KubevirtHostDevice `json:"hostDevices,omitempty"` -} - -// KubevirtNetwork specifies the configuration for a virtual machine -// network interface -type KubevirtNetwork struct { - // Name specify the network attached to the nodes - // it is a value with the format "[namespace]/[name]" to reference the - // multus network attachment definition - Name string `json:"name"` -} - -type KubevirtHostDevice struct { - // DeviceName is the name of the host device that is desired to be utilized in the HostedCluster's NodePool - // The device can be any supported PCI device, including GPU, either as a passthrough or a vGPU slice. - DeviceName string `json:"deviceName"` - - // Count is the number of instances the specified host device will be attached to each of the - // NodePool's nodes. Default is 1. - // - // +optional - // +kubebuilder:default=1 - // +kubebuilder:validation:Minimum=1 - Count int `json:"count,omitempty"` -} - -// AWSNodePoolPlatform specifies the configuration of a NodePool when operating -// on AWS. -type AWSNodePoolPlatform struct { - // InstanceType is an ec2 instance type for node instances (e.g. m5.large). - InstanceType string `json:"instanceType"` - - // InstanceProfile is the AWS EC2 instance profile, which is a container for an IAM role that the EC2 instance uses. - InstanceProfile string `json:"instanceProfile,omitempty"` - - // +kubebuilder:validation:XValidation:rule="has(self.id) && self.id.startsWith('subnet-') ? !has(self.filters) : size(self.filters) > 0", message="subnet is invalid, a valid subnet id or filters must be set, but not both" - // +kubebuilder:validation:Required - // - // Subnet is the subnet to use for node instances. - Subnet AWSResourceReference `json:"subnet,omitempty"` - - // AMI is the image id to use for node instances. If unspecified, the default - // is chosen based on the NodePool release payload image. - // - // +optional - AMI string `json:"ami,omitempty"` - - // SecurityGroups is an optional set of security groups to associate with node - // instances. - // - // +optional - SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"` - - // RootVolume specifies configuration for the root volume of node instances. - // - // +optional - RootVolume *Volume `json:"rootVolume,omitempty"` - - // ResourceTags is an optional list of additional tags to apply to AWS node - // instances. - // - // These will be merged with HostedCluster scoped tags, and HostedCluster tags - // take precedence in case of conflicts. - // - // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for - // information on tagging AWS resources. AWS supports a maximum of 50 tags per - // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available - // for the user. - // - // +kubebuilder:validation:MaxItems=25 - // +listType=map - // +listMapKey=key - // +optional - ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` -} - -// AWSResourceReference is a reference to a specific AWS resource by ID or filters. -// Only one of ID or Filters may be specified. Specifying more than one will result in -// a validation error. -type AWSResourceReference struct { - // ID of resource - // +optional - ID *string `json:"id,omitempty"` - - // Filters is a set of key/value pairs used to identify a resource - // They are applied according to the rules defined by the AWS API: - // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html - // +optional - Filters []Filter `json:"filters,omitempty"` -} - -// Filter is a filter used to identify an AWS resource -type Filter struct { - // Name of the filter. Filter names are case-sensitive. - Name string `json:"name"` - - // Values includes one or more filter values. Filter values are case-sensitive. - Values []string `json:"values"` -} - -// Volume specifies the configuration options for node instance storage devices. -type Volume struct { - // Size specifies size (in Gi) of the storage device. - // - // Must be greater than the image snapshot size or 8 (whichever is greater). - // - // +kubebuilder:validation:Minimum=8 - Size int64 `json:"size"` - - // Type is the type of the volume. - Type string `json:"type"` - - // IOPS is the number of IOPS requested for the disk. This is only valid - // for type io1. - // - // +optional - IOPS int64 `json:"iops,omitempty"` - - // Encrypted is whether the volume should be encrypted or not. - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Encrypted is immutable" - Encrypted *bool `json:"encrypted,omitempty"` - - // EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. - // If Encrypted is set and this is omitted, the default AWS key will be used. - // The key must already exist and be accessible by the controller. - // +optional - EncryptionKey string `json:"encryptionKey,omitempty"` -} - -// AgentNodePoolPlatform specifies the configuration of a NodePool when operating -// on the Agent platform. -type AgentNodePoolPlatform struct { - // AgentLabelSelector contains labels that must be set on an Agent in order to - // be selected for a Machine. - // +optional - AgentLabelSelector *metav1.LabelSelector `json:"agentLabelSelector,omitempty"` -} - -type AzureNodePoolPlatform struct { - VMSize string `json:"vmsize"` - // ImageID is the id of the image to boot from. If unset, the default image at the location below will be used: - // subscription/$subscriptionID/resourceGroups/$resourceGroupName/providers/Microsoft.Compute/images/rhcos.x86_64.vhd - // +optional - ImageID string `json:"imageID,omitempty"` - // +kubebuilder:default:=120 - // +kubebuilder:validation:Minimum=16 - // +optional - DiskSizeGB int32 `json:"diskSizeGB,omitempty"` - // DiskStorageAccountType is the disk storage account type to use. Valid values are: - // * Standard_LRS: HDD - // * StandardSSD_LRS: Standard SSD - // * Premium_LRS: Premium SDD - // * UltraSSD_LRS: Ultra SDD - // - // Defaults to Premium_LRS. For more details, visit the Azure documentation: - // https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#disk-type-comparison - // - // +kubebuilder:default:=Premium_LRS - // +kubebuilder:validation:Enum=Standard_LRS;StandardSSD_LRS;Premium_LRS;UltraSSD_LRS - // +optional - DiskStorageAccountType string `json:"diskStorageAccountType,omitempty"` - // AvailabilityZone of the nodepool. Must not be specified for clusters - // in a location that does not support AvailabilityZone. - // +optional - AvailabilityZone string `json:"availabilityZone,omitempty"` - // DiskEncryptionSetID is the ID of the DiskEncryptionSet resource to use to encrypt the OS disks for the VMs. - // +optional - DiskEncryptionSetID string `json:"diskEncryptionSetID,omitempty"` - // EnableEphemeralOSDisk enables ephemeral OS disk - // +optional - EnableEphemeralOSDisk bool `json:"enableEphemeralOSDisk,omitempty"` - // SubnetID is the subnet ID of an existing subnet where the nodes in the nodepool will be created. This can be a - // different subnet than the one listed in the HostedCluster, hcluster.Spec.Platform.Azure.SubnetID, but must exist - // in the same hcluster.Spec.Platform.Azure.VnetID and must exist under the same subscription ID, - // hcluster.Spec.Platform.Azure.SubscriptionID. - // - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="SubnetID is immutable" - // +kubebuilder:validation:Required - // +immutable - // +required - SubnetID string `json:"subnetID"` - // Diagnostics specifies the diagnostics settings for a virtual machine. - // If not specified then Boot diagnostics will be disabled. - // +optional - Diagnostics *Diagnostics `json:"diagnostics,omitempty"` - MachineIdentityID string `json:"machineIdentityID"` -} - -// We define our own condition type since metav1.Condition has validation -// for Reason that might be broken by what we bubble up from CAPI. -// NodePoolCondition defines an observation of NodePool resource operational state. -type NodePoolCondition struct { - // Type of condition in CamelCase or in foo.example.com/CamelCase. - // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - // can be useful (see .node.status.conditions), the ability to deconflict is important. - Type string `json:"type"` - - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - - // Severity provides an explicit classification of Reason code, so the users or machines can immediately - // understand the current situation and act accordingly. - // The Severity field MUST be set only when Status=False. - // +optional - Severity string `json:"severity,omitempty"` - - // Last time the condition transitioned from one status to another. - // This should be when the underlying condition changed. If that is not known, then using the time when - // the API field changed is acceptable. - LastTransitionTime metav1.Time `json:"lastTransitionTime"` - - // The reason for the condition's last transition in CamelCase. - // The specific API may choose whether or not this field is considered a guaranteed API. - // This field may not be empty. - // +optional - Reason string `json:"reason,omitempty"` - - // A human readable message indicating details about the transition. - // This field may be empty. - // +optional - Message string `json:"message,omitempty"` - - // +kubebuilder:validation:Minimum=0 - ObservedGeneration int64 `json:"observedGeneration,omitempty"` -} - -// NodePoolPlatformStatus contains specific platform statuses -type NodePoolPlatformStatus struct { - // KubeVirt contains the KubeVirt platform statuses - // +optional - KubeVirt *KubeVirtNodePoolStatus `json:"kubeVirt,omitempty"` -} - -// KubeVirtNodePoolStatus contains the KubeVirt platform statuses -type KubeVirtNodePoolStatus struct { - // CacheName holds the name of the cache DataVolume, if exists - // +optional - CacheName string `json:"cacheName,omitempty"` - - // Credentials shows the client credentials used when creating KubeVirt virtual machines. - // This filed is only exists when the KubeVirt virtual machines are being placed - // on a cluster separate from the one hosting the Hosted Control Plane components. - // - // The default behavior when Credentials is not defined is for the KubeVirt VMs to be placed on - // the same cluster and namespace as the Hosted Control Plane. - // +optional - Credentials *KubevirtPlatformCredentials `json:"credentials,omitempty"` -} - -// Taint is as v1 Core but without TimeAdded. -// https://github.com/kubernetes/kubernetes/blob/ed8cad1e80d096257921908a52ac69cf1f41a098/staging/src/k8s.io/api/core/v1/types.go#L3037-L3053 -type Taint struct { - // Required. The taint key to be applied to a node. - Key string `json:"key"` - // The taint value corresponding to the taint key. - // +optional - Value string `json:"value,omitempty"` - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. - Effect corev1.TaintEffect `json:"effect"` -} - -// Diagnostics specifies the diagnostics settings for a virtual machine. -// +kubebuilder:validation:XValidation:rule="self.storageAccountType == 'UserManaged' ? has(self.storageAccountURI) : true", message="storageAccountURI is required when storageAccountType is UserManaged" -type Diagnostics struct { - // StorageAccountType determines if the storage account for storing the diagnostics data - // should be disabled (Disabled), provisioned by Azure (Managed) or by the user (UserManaged). - // +kubebuilder:validation:Enum=Managed;UserManaged;Disabled - // +kubebuilder:default:=Disabled - StorageAccountType string `json:"storageAccountType,omitempty"` - // StorageAccountURI is the URI of the user-managed storage account. - // The URI typically will be `https://.blob.core.windows.net/` - // but may differ if you are using Azure DNS zone endpoints. - // You can find the correct endpoint by looking for the Blob Primary Endpoint in the - // endpoints tab in the Azure console or with the CLI by issuing - // `az storage account list --query='[].{name: name, "resource group": resourceGroup, "blob endpoint": primaryEndpoints.blob}'`. - // +kubebuilder:validation:Format=uri - // +kubebuilder:validation:MaxLength=1024 - // +optional - StorageAccountURI string `json:"storageAccountURI,omitempty"` -} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index b0a76b142..000000000 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,2599 +0,0 @@ -//go:build !ignore_autogenerated - -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - configv1 "github.com/openshift/api/config/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AESCBCSpec) DeepCopyInto(out *AESCBCSpec) { - *out = *in - out.ActiveKey = in.ActiveKey - if in.BackupKey != nil { - in, out := &in.BackupKey, &out.BackupKey - *out = new(corev1.LocalObjectReference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESCBCSpec. -func (in *AESCBCSpec) DeepCopy() *AESCBCSpec { - if in == nil { - return nil - } - out := new(AESCBCSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. -func (in *APIEndpoint) DeepCopy() *APIEndpoint { - if in == nil { - return nil - } - out := new(APIEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServerNetworking) DeepCopyInto(out *APIServerNetworking) { - *out = *in - if in.AdvertiseAddress != nil { - in, out := &in.AdvertiseAddress, &out.AdvertiseAddress - *out = new(string) - **out = **in - } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(int32) - **out = **in - } - if in.AllowedCIDRBlocks != nil { - in, out := &in.AllowedCIDRBlocks, &out.AllowedCIDRBlocks - *out = make([]CIDRBlock, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNetworking. -func (in *APIServerNetworking) DeepCopy() *APIServerNetworking { - if in == nil { - return nil - } - out := new(APIServerNetworking) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSCloudProviderConfig) DeepCopyInto(out *AWSCloudProviderConfig) { - *out = *in - if in.Subnet != nil { - in, out := &in.Subnet, &out.Subnet - *out = new(AWSResourceReference) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCloudProviderConfig. -func (in *AWSCloudProviderConfig) DeepCopy() *AWSCloudProviderConfig { - if in == nil { - return nil - } - out := new(AWSCloudProviderConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSEndpointService) DeepCopyInto(out *AWSEndpointService) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointService. -func (in *AWSEndpointService) DeepCopy() *AWSEndpointService { - if in == nil { - return nil - } - out := new(AWSEndpointService) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSEndpointService) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSEndpointServiceList) DeepCopyInto(out *AWSEndpointServiceList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AWSEndpointService, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointServiceList. -func (in *AWSEndpointServiceList) DeepCopy() *AWSEndpointServiceList { - if in == nil { - return nil - } - out := new(AWSEndpointServiceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSEndpointServiceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSEndpointServiceSpec) DeepCopyInto(out *AWSEndpointServiceSpec) { - *out = *in - if in.SubnetIDs != nil { - in, out := &in.SubnetIDs, &out.SubnetIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ResourceTags != nil { - in, out := &in.ResourceTags, &out.ResourceTags - *out = make([]AWSResourceTag, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointServiceSpec. -func (in *AWSEndpointServiceSpec) DeepCopy() *AWSEndpointServiceSpec { - if in == nil { - return nil - } - out := new(AWSEndpointServiceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSEndpointServiceStatus) DeepCopyInto(out *AWSEndpointServiceStatus) { - *out = *in - if in.DNSNames != nil { - in, out := &in.DNSNames, &out.DNSNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEndpointServiceStatus. -func (in *AWSEndpointServiceStatus) DeepCopy() *AWSEndpointServiceStatus { - if in == nil { - return nil - } - out := new(AWSEndpointServiceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSKMSAuthSpec) DeepCopyInto(out *AWSKMSAuthSpec) { - *out = *in - out.Credentials = in.Credentials -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSAuthSpec. -func (in *AWSKMSAuthSpec) DeepCopy() *AWSKMSAuthSpec { - if in == nil { - return nil - } - out := new(AWSKMSAuthSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSKMSKeyEntry) DeepCopyInto(out *AWSKMSKeyEntry) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSKeyEntry. -func (in *AWSKMSKeyEntry) DeepCopy() *AWSKMSKeyEntry { - if in == nil { - return nil - } - out := new(AWSKMSKeyEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSKMSSpec) DeepCopyInto(out *AWSKMSSpec) { - *out = *in - out.ActiveKey = in.ActiveKey - if in.BackupKey != nil { - in, out := &in.BackupKey, &out.BackupKey - *out = new(AWSKMSKeyEntry) - **out = **in - } - out.Auth = in.Auth -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSSpec. -func (in *AWSKMSSpec) DeepCopy() *AWSKMSSpec { - if in == nil { - return nil - } - out := new(AWSKMSSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSNodePoolPlatform) DeepCopyInto(out *AWSNodePoolPlatform) { - *out = *in - in.Subnet.DeepCopyInto(&out.Subnet) - if in.SecurityGroups != nil { - in, out := &in.SecurityGroups, &out.SecurityGroups - *out = make([]AWSResourceReference, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RootVolume != nil { - in, out := &in.RootVolume, &out.RootVolume - *out = new(Volume) - (*in).DeepCopyInto(*out) - } - if in.ResourceTags != nil { - in, out := &in.ResourceTags, &out.ResourceTags - *out = make([]AWSResourceTag, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNodePoolPlatform. -func (in *AWSNodePoolPlatform) DeepCopy() *AWSNodePoolPlatform { - if in == nil { - return nil - } - out := new(AWSNodePoolPlatform) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { - *out = *in - if in.CloudProviderConfig != nil { - in, out := &in.CloudProviderConfig, &out.CloudProviderConfig - *out = new(AWSCloudProviderConfig) - (*in).DeepCopyInto(*out) - } - if in.ServiceEndpoints != nil { - in, out := &in.ServiceEndpoints, &out.ServiceEndpoints - *out = make([]AWSServiceEndpoint, len(*in)) - copy(*out, *in) - } - out.RolesRef = in.RolesRef - if in.Roles != nil { - in, out := &in.Roles, &out.Roles - *out = make([]AWSRoleCredentials, len(*in)) - copy(*out, *in) - } - out.KubeCloudControllerCreds = in.KubeCloudControllerCreds - out.NodePoolManagementCreds = in.NodePoolManagementCreds - out.ControlPlaneOperatorCreds = in.ControlPlaneOperatorCreds - if in.ResourceTags != nil { - in, out := &in.ResourceTags, &out.ResourceTags - *out = make([]AWSResourceTag, len(*in)) - copy(*out, *in) - } - if in.AdditionalAllowedPrincipals != nil { - in, out := &in.AdditionalAllowedPrincipals, &out.AdditionalAllowedPrincipals - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec. -func (in *AWSPlatformSpec) DeepCopy() *AWSPlatformSpec { - if in == nil { - return nil - } - out := new(AWSPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus. -func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { - if in == nil { - return nil - } - out := new(AWSPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) { - *out = *in - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.Filters != nil { - in, out := &in.Filters, &out.Filters - *out = make([]Filter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference. -func (in *AWSResourceReference) DeepCopy() *AWSResourceReference { - if in == nil { - return nil - } - out := new(AWSResourceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag. -func (in *AWSResourceTag) DeepCopy() *AWSResourceTag { - if in == nil { - return nil - } - out := new(AWSResourceTag) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSRoleCredentials) DeepCopyInto(out *AWSRoleCredentials) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSRoleCredentials. -func (in *AWSRoleCredentials) DeepCopy() *AWSRoleCredentials { - if in == nil { - return nil - } - out := new(AWSRoleCredentials) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSRolesRef) DeepCopyInto(out *AWSRolesRef) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSRolesRef. -func (in *AWSRolesRef) DeepCopy() *AWSRolesRef { - if in == nil { - return nil - } - out := new(AWSRolesRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSServiceEndpoint. -func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { - if in == nil { - return nil - } - out := new(AWSServiceEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AgentNodePoolPlatform) DeepCopyInto(out *AgentNodePoolPlatform) { - *out = *in - if in.AgentLabelSelector != nil { - in, out := &in.AgentLabelSelector, &out.AgentLabelSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentNodePoolPlatform. -func (in *AgentNodePoolPlatform) DeepCopy() *AgentNodePoolPlatform { - if in == nil { - return nil - } - out := new(AgentNodePoolPlatform) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AgentPlatformSpec) DeepCopyInto(out *AgentPlatformSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentPlatformSpec. -func (in *AgentPlatformSpec) DeepCopy() *AgentPlatformSpec { - if in == nil { - return nil - } - out := new(AgentPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureKMSKey) DeepCopyInto(out *AzureKMSKey) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureKMSKey. -func (in *AzureKMSKey) DeepCopy() *AzureKMSKey { - if in == nil { - return nil - } - out := new(AzureKMSKey) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureKMSSpec) DeepCopyInto(out *AzureKMSSpec) { - *out = *in - out.ActiveKey = in.ActiveKey - if in.BackupKey != nil { - in, out := &in.BackupKey, &out.BackupKey - *out = new(AzureKMSKey) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureKMSSpec. -func (in *AzureKMSSpec) DeepCopy() *AzureKMSSpec { - if in == nil { - return nil - } - out := new(AzureKMSSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureNodePoolPlatform) DeepCopyInto(out *AzureNodePoolPlatform) { - *out = *in - if in.Diagnostics != nil { - in, out := &in.Diagnostics, &out.Diagnostics - *out = new(Diagnostics) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNodePoolPlatform. -func (in *AzureNodePoolPlatform) DeepCopy() *AzureNodePoolPlatform { - if in == nil { - return nil - } - out := new(AzureNodePoolPlatform) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) { - *out = *in - out.Credentials = in.Credentials -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec. -func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { - if in == nil { - return nil - } - out := new(AzurePlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterAutoscaling) DeepCopyInto(out *ClusterAutoscaling) { - *out = *in - if in.MaxNodesTotal != nil { - in, out := &in.MaxNodesTotal, &out.MaxNodesTotal - *out = new(int32) - **out = **in - } - if in.MaxPodGracePeriod != nil { - in, out := &in.MaxPodGracePeriod, &out.MaxPodGracePeriod - *out = new(int32) - **out = **in - } - if in.PodPriorityThreshold != nil { - in, out := &in.PodPriorityThreshold, &out.PodPriorityThreshold - *out = new(int32) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscaling. -func (in *ClusterAutoscaling) DeepCopy() *ClusterAutoscaling { - if in == nil { - return nil - } - out := new(ClusterAutoscaling) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterConfiguration) DeepCopyInto(out *ClusterConfiguration) { - *out = *in - if in.SecretRefs != nil { - in, out := &in.SecretRefs, &out.SecretRefs - *out = make([]corev1.LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.ConfigMapRefs != nil { - in, out := &in.ConfigMapRefs, &out.ConfigMapRefs - *out = make([]corev1.LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.APIServer != nil { - in, out := &in.APIServer, &out.APIServer - *out = new(configv1.APIServerSpec) - (*in).DeepCopyInto(*out) - } - if in.Authentication != nil { - in, out := &in.Authentication, &out.Authentication - *out = new(configv1.AuthenticationSpec) - (*in).DeepCopyInto(*out) - } - if in.FeatureGate != nil { - in, out := &in.FeatureGate, &out.FeatureGate - *out = new(configv1.FeatureGateSpec) - (*in).DeepCopyInto(*out) - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(configv1.ImageSpec) - (*in).DeepCopyInto(*out) - } - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = new(configv1.IngressSpec) - (*in).DeepCopyInto(*out) - } - if in.Network != nil { - in, out := &in.Network, &out.Network - *out = new(configv1.NetworkSpec) - (*in).DeepCopyInto(*out) - } - if in.OAuth != nil { - in, out := &in.OAuth, &out.OAuth - *out = new(configv1.OAuthSpec) - (*in).DeepCopyInto(*out) - } - if in.OperatorHub != nil { - in, out := &in.OperatorHub, &out.OperatorHub - *out = new(configv1.OperatorHubSpec) - (*in).DeepCopyInto(*out) - } - if in.Scheduler != nil { - in, out := &in.Scheduler, &out.Scheduler - *out = new(configv1.SchedulerSpec) - **out = **in - } - if in.Proxy != nil { - in, out := &in.Proxy, &out.Proxy - *out = new(configv1.ProxySpec) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfiguration. -func (in *ClusterConfiguration) DeepCopy() *ClusterConfiguration { - if in == nil { - return nil - } - out := new(ClusterConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { - *out = *in - in.CIDR.DeepCopyInto(&out.CIDR) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. -func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { - if in == nil { - return nil - } - out := new(ClusterNetworkEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterNetworking) DeepCopyInto(out *ClusterNetworking) { - *out = *in - if in.MachineNetwork != nil { - in, out := &in.MachineNetwork, &out.MachineNetwork - *out = make([]MachineNetworkEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ClusterNetwork != nil { - in, out := &in.ClusterNetwork, &out.ClusterNetwork - *out = make([]ClusterNetworkEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ServiceNetwork != nil { - in, out := &in.ServiceNetwork, &out.ServiceNetwork - *out = make([]ServiceNetworkEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.APIServer != nil { - in, out := &in.APIServer, &out.APIServer - *out = new(APIServerNetworking) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworking. -func (in *ClusterNetworking) DeepCopy() *ClusterNetworking { - if in == nil { - return nil - } - out := new(ClusterNetworking) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { - *out = *in - in.Desired.DeepCopyInto(&out.Desired) - if in.History != nil { - in, out := &in.History, &out.History - *out = make([]configv1.UpdateHistory, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AvailableUpdates != nil { - in, out := &in.AvailableUpdates, &out.AvailableUpdates - *out = make([]configv1.Release, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConditionalUpdates != nil { - in, out := &in.ConditionalUpdates, &out.ConditionalUpdates - *out = make([]configv1.ConditionalUpdate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus. -func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus { - if in == nil { - return nil - } - out := new(ClusterVersionStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { - *out = *in - if in.BaseDomainPrefix != nil { - in, out := &in.BaseDomainPrefix, &out.BaseDomainPrefix - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. -func (in *DNSSpec) DeepCopy() *DNSSpec { - if in == nil { - return nil - } - out := new(DNSSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Diagnostics) DeepCopyInto(out *Diagnostics) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Diagnostics. -func (in *Diagnostics) DeepCopy() *Diagnostics { - if in == nil { - return nil - } - out := new(Diagnostics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { - *out = *in - if in.Managed != nil { - in, out := &in.Managed, &out.Managed - *out = new(ManagedEtcdSpec) - (*in).DeepCopyInto(*out) - } - if in.Unmanaged != nil { - in, out := &in.Unmanaged, &out.Unmanaged - *out = new(UnmanagedEtcdSpec) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec. -func (in *EtcdSpec) DeepCopy() *EtcdSpec { - if in == nil { - return nil - } - out := new(EtcdSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EtcdTLSConfig) DeepCopyInto(out *EtcdTLSConfig) { - *out = *in - out.ClientSecret = in.ClientSecret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdTLSConfig. -func (in *EtcdTLSConfig) DeepCopy() *EtcdTLSConfig { - if in == nil { - return nil - } - out := new(EtcdTLSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Filter) DeepCopyInto(out *Filter) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. -func (in *Filter) DeepCopy() *Filter { - if in == nil { - return nil - } - out := new(Filter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostedCluster) DeepCopyInto(out *HostedCluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedCluster. -func (in *HostedCluster) DeepCopy() *HostedCluster { - if in == nil { - return nil - } - out := new(HostedCluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HostedCluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostedClusterList) DeepCopyInto(out *HostedClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HostedCluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedClusterList. -func (in *HostedClusterList) DeepCopy() *HostedClusterList { - if in == nil { - return nil - } - out := new(HostedClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HostedClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostedClusterSpec) DeepCopyInto(out *HostedClusterSpec) { - *out = *in - out.Release = in.Release - if in.ControlPlaneRelease != nil { - in, out := &in.ControlPlaneRelease, &out.ControlPlaneRelease - *out = new(Release) - **out = **in - } - in.Platform.DeepCopyInto(&out.Platform) - in.DNS.DeepCopyInto(&out.DNS) - in.Networking.DeepCopyInto(&out.Networking) - in.Autoscaling.DeepCopyInto(&out.Autoscaling) - in.Etcd.DeepCopyInto(&out.Etcd) - if in.Services != nil { - in, out := &in.Services, &out.Services - *out = make([]ServicePublishingStrategyMapping, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - out.PullSecret = in.PullSecret - out.SSHKey = in.SSHKey - if in.ServiceAccountSigningKey != nil { - in, out := &in.ServiceAccountSigningKey, &out.ServiceAccountSigningKey - *out = new(corev1.LocalObjectReference) - **out = **in - } - if in.Configuration != nil { - in, out := &in.Configuration, &out.Configuration - *out = new(ClusterConfiguration) - (*in).DeepCopyInto(*out) - } - if in.AuditWebhook != nil { - in, out := &in.AuditWebhook, &out.AuditWebhook - *out = new(corev1.LocalObjectReference) - **out = **in - } - if in.ImageContentSources != nil { - in, out := &in.ImageContentSources, &out.ImageContentSources - *out = make([]ImageContentSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AdditionalTrustBundle != nil { - in, out := &in.AdditionalTrustBundle, &out.AdditionalTrustBundle - *out = new(corev1.LocalObjectReference) - **out = **in - } - if in.SecretEncryption != nil { - in, out := &in.SecretEncryption, &out.SecretEncryption - *out = new(SecretEncryptionSpec) - (*in).DeepCopyInto(*out) - } - if in.PausedUntil != nil { - in, out := &in.PausedUntil, &out.PausedUntil - *out = new(string) - **out = **in - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedClusterSpec. -func (in *HostedClusterSpec) DeepCopy() *HostedClusterSpec { - if in == nil { - return nil - } - out := new(HostedClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostedClusterStatus) DeepCopyInto(out *HostedClusterStatus) { - *out = *in - if in.Version != nil { - in, out := &in.Version, &out.Version - *out = new(ClusterVersionStatus) - (*in).DeepCopyInto(*out) - } - if in.KubeConfig != nil { - in, out := &in.KubeConfig, &out.KubeConfig - *out = new(corev1.LocalObjectReference) - **out = **in - } - if in.KubeadminPassword != nil { - in, out := &in.KubeadminPassword, &out.KubeadminPassword - *out = new(corev1.LocalObjectReference) - **out = **in - } - out.ControlPlaneEndpoint = in.ControlPlaneEndpoint - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Platform != nil { - in, out := &in.Platform, &out.Platform - *out = new(PlatformStatus) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedClusterStatus. -func (in *HostedClusterStatus) DeepCopy() *HostedClusterStatus { - if in == nil { - return nil - } - out := new(HostedClusterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostedControlPlane) DeepCopyInto(out *HostedControlPlane) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlane. -func (in *HostedControlPlane) DeepCopy() *HostedControlPlane { - if in == nil { - return nil - } - out := new(HostedControlPlane) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HostedControlPlane) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostedControlPlaneList) DeepCopyInto(out *HostedControlPlaneList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HostedControlPlane, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneList. -func (in *HostedControlPlaneList) DeepCopy() *HostedControlPlaneList { - if in == nil { - return nil - } - out := new(HostedControlPlaneList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HostedControlPlaneList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostedControlPlaneSpec) DeepCopyInto(out *HostedControlPlaneSpec) { - *out = *in - if in.ControlPlaneReleaseImage != nil { - in, out := &in.ControlPlaneReleaseImage, &out.ControlPlaneReleaseImage - *out = new(string) - **out = **in - } - out.PullSecret = in.PullSecret - in.Networking.DeepCopyInto(&out.Networking) - out.SSHKey = in.SSHKey - in.Platform.DeepCopyInto(&out.Platform) - in.DNS.DeepCopyInto(&out.DNS) - if in.ServiceAccountSigningKey != nil { - in, out := &in.ServiceAccountSigningKey, &out.ServiceAccountSigningKey - *out = new(corev1.LocalObjectReference) - **out = **in - } - if in.APIPort != nil { - in, out := &in.APIPort, &out.APIPort - *out = new(int32) - **out = **in - } - if in.APIAdvertiseAddress != nil { - in, out := &in.APIAdvertiseAddress, &out.APIAdvertiseAddress - *out = new(string) - **out = **in - } - if in.APIAllowedCIDRBlocks != nil { - in, out := &in.APIAllowedCIDRBlocks, &out.APIAllowedCIDRBlocks - *out = make([]CIDRBlock, len(*in)) - copy(*out, *in) - } - if in.KubeConfig != nil { - in, out := &in.KubeConfig, &out.KubeConfig - *out = new(KubeconfigSecretRef) - **out = **in - } - if in.Services != nil { - in, out := &in.Services, &out.Services - *out = make([]ServicePublishingStrategyMapping, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AuditWebhook != nil { - in, out := &in.AuditWebhook, &out.AuditWebhook - *out = new(corev1.LocalObjectReference) - **out = **in - } - in.Etcd.DeepCopyInto(&out.Etcd) - if in.Configuration != nil { - in, out := &in.Configuration, &out.Configuration - *out = new(ClusterConfiguration) - (*in).DeepCopyInto(*out) - } - if in.ImageContentSources != nil { - in, out := &in.ImageContentSources, &out.ImageContentSources - *out = make([]ImageContentSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AdditionalTrustBundle != nil { - in, out := &in.AdditionalTrustBundle, &out.AdditionalTrustBundle - *out = new(corev1.LocalObjectReference) - **out = **in - } - if in.SecretEncryption != nil { - in, out := &in.SecretEncryption, &out.SecretEncryption - *out = new(SecretEncryptionSpec) - (*in).DeepCopyInto(*out) - } - if in.PausedUntil != nil { - in, out := &in.PausedUntil, &out.PausedUntil - *out = new(string) - **out = **in - } - in.Autoscaling.DeepCopyInto(&out.Autoscaling) - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneSpec. -func (in *HostedControlPlaneSpec) DeepCopy() *HostedControlPlaneSpec { - if in == nil { - return nil - } - out := new(HostedControlPlaneSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostedControlPlaneStatus) DeepCopyInto(out *HostedControlPlaneStatus) { - *out = *in - if in.ExternalManagedControlPlane != nil { - in, out := &in.ExternalManagedControlPlane, &out.ExternalManagedControlPlane - *out = new(bool) - **out = **in - } - out.ControlPlaneEndpoint = in.ControlPlaneEndpoint - if in.VersionStatus != nil { - in, out := &in.VersionStatus, &out.VersionStatus - *out = new(ClusterVersionStatus) - (*in).DeepCopyInto(*out) - } - if in.LastReleaseImageTransitionTime != nil { - in, out := &in.LastReleaseImageTransitionTime, &out.LastReleaseImageTransitionTime - *out = (*in).DeepCopy() - } - if in.KubeConfig != nil { - in, out := &in.KubeConfig, &out.KubeConfig - *out = new(KubeconfigSecretRef) - **out = **in - } - if in.KubeadminPassword != nil { - in, out := &in.KubeadminPassword, &out.KubeadminPassword - *out = new(corev1.LocalObjectReference) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Platform != nil { - in, out := &in.Platform, &out.Platform - *out = new(PlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.NodeCount != nil { - in, out := &in.NodeCount, &out.NodeCount - *out = new(int) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneStatus. -func (in *HostedControlPlaneStatus) DeepCopy() *HostedControlPlaneStatus { - if in == nil { - return nil - } - out := new(HostedControlPlaneStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBMCloudKMSAuthSpec) DeepCopyInto(out *IBMCloudKMSAuthSpec) { - *out = *in - if in.Unmanaged != nil { - in, out := &in.Unmanaged, &out.Unmanaged - *out = new(IBMCloudKMSUnmanagedAuthSpec) - **out = **in - } - if in.Managed != nil { - in, out := &in.Managed, &out.Managed - *out = new(IBMCloudKMSManagedAuthSpec) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSAuthSpec. -func (in *IBMCloudKMSAuthSpec) DeepCopy() *IBMCloudKMSAuthSpec { - if in == nil { - return nil - } - out := new(IBMCloudKMSAuthSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBMCloudKMSKeyEntry) DeepCopyInto(out *IBMCloudKMSKeyEntry) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSKeyEntry. -func (in *IBMCloudKMSKeyEntry) DeepCopy() *IBMCloudKMSKeyEntry { - if in == nil { - return nil - } - out := new(IBMCloudKMSKeyEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBMCloudKMSManagedAuthSpec) DeepCopyInto(out *IBMCloudKMSManagedAuthSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSManagedAuthSpec. -func (in *IBMCloudKMSManagedAuthSpec) DeepCopy() *IBMCloudKMSManagedAuthSpec { - if in == nil { - return nil - } - out := new(IBMCloudKMSManagedAuthSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBMCloudKMSSpec) DeepCopyInto(out *IBMCloudKMSSpec) { - *out = *in - in.Auth.DeepCopyInto(&out.Auth) - if in.KeyList != nil { - in, out := &in.KeyList, &out.KeyList - *out = make([]IBMCloudKMSKeyEntry, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSSpec. -func (in *IBMCloudKMSSpec) DeepCopy() *IBMCloudKMSSpec { - if in == nil { - return nil - } - out := new(IBMCloudKMSSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBMCloudKMSUnmanagedAuthSpec) DeepCopyInto(out *IBMCloudKMSUnmanagedAuthSpec) { - *out = *in - out.Credentials = in.Credentials -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudKMSUnmanagedAuthSpec. -func (in *IBMCloudKMSUnmanagedAuthSpec) DeepCopy() *IBMCloudKMSUnmanagedAuthSpec { - if in == nil { - return nil - } - out := new(IBMCloudKMSUnmanagedAuthSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformSpec. -func (in *IBMCloudPlatformSpec) DeepCopy() *IBMCloudPlatformSpec { - if in == nil { - return nil - } - out := new(IBMCloudPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageContentSource) DeepCopyInto(out *ImageContentSource) { - *out = *in - if in.Mirrors != nil { - in, out := &in.Mirrors, &out.Mirrors - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSource. -func (in *ImageContentSource) DeepCopy() *ImageContentSource { - if in == nil { - return nil - } - out := new(ImageContentSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InPlaceUpgrade) DeepCopyInto(out *InPlaceUpgrade) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpgrade. -func (in *InPlaceUpgrade) DeepCopy() *InPlaceUpgrade { - if in == nil { - return nil - } - out := new(InPlaceUpgrade) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KMSSpec) DeepCopyInto(out *KMSSpec) { - *out = *in - if in.IBMCloud != nil { - in, out := &in.IBMCloud, &out.IBMCloud - *out = new(IBMCloudKMSSpec) - (*in).DeepCopyInto(*out) - } - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSKMSSpec) - (*in).DeepCopyInto(*out) - } - if in.Azure != nil { - in, out := &in.Azure, &out.Azure - *out = new(AzureKMSSpec) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSSpec. -func (in *KMSSpec) DeepCopy() *KMSSpec { - if in == nil { - return nil - } - out := new(KMSSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeVirtNodePoolStatus) DeepCopyInto(out *KubeVirtNodePoolStatus) { - *out = *in - if in.Credentials != nil { - in, out := &in.Credentials, &out.Credentials - *out = new(KubevirtPlatformCredentials) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtNodePoolStatus. -func (in *KubeVirtNodePoolStatus) DeepCopy() *KubeVirtNodePoolStatus { - if in == nil { - return nil - } - out := new(KubeVirtNodePoolStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeconfigSecretRef) DeepCopyInto(out *KubeconfigSecretRef) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigSecretRef. -func (in *KubeconfigSecretRef) DeepCopy() *KubeconfigSecretRef { - if in == nil { - return nil - } - out := new(KubeconfigSecretRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtCachingStrategy) DeepCopyInto(out *KubevirtCachingStrategy) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtCachingStrategy. -func (in *KubevirtCachingStrategy) DeepCopy() *KubevirtCachingStrategy { - if in == nil { - return nil - } - out := new(KubevirtCachingStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtCompute) DeepCopyInto(out *KubevirtCompute) { - *out = *in - if in.Memory != nil { - in, out := &in.Memory, &out.Memory - x := (*in).DeepCopy() - *out = &x - } - if in.Cores != nil { - in, out := &in.Cores, &out.Cores - *out = new(uint32) - **out = **in - } - if in.QosClass != nil { - in, out := &in.QosClass, &out.QosClass - *out = new(QoSClass) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtCompute. -func (in *KubevirtCompute) DeepCopy() *KubevirtCompute { - if in == nil { - return nil - } - out := new(KubevirtCompute) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtDiskImage) DeepCopyInto(out *KubevirtDiskImage) { - *out = *in - if in.ContainerDiskImage != nil { - in, out := &in.ContainerDiskImage, &out.ContainerDiskImage - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtDiskImage. -func (in *KubevirtDiskImage) DeepCopy() *KubevirtDiskImage { - if in == nil { - return nil - } - out := new(KubevirtDiskImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtHostDevice) DeepCopyInto(out *KubevirtHostDevice) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtHostDevice. -func (in *KubevirtHostDevice) DeepCopy() *KubevirtHostDevice { - if in == nil { - return nil - } - out := new(KubevirtHostDevice) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtManualStorageDriverConfig) DeepCopyInto(out *KubevirtManualStorageDriverConfig) { - *out = *in - if in.StorageClassMapping != nil { - in, out := &in.StorageClassMapping, &out.StorageClassMapping - *out = make([]KubevirtStorageClassMapping, len(*in)) - copy(*out, *in) - } - if in.VolumeSnapshotClassMapping != nil { - in, out := &in.VolumeSnapshotClassMapping, &out.VolumeSnapshotClassMapping - *out = make([]KubevirtVolumeSnapshotClassMapping, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtManualStorageDriverConfig. -func (in *KubevirtManualStorageDriverConfig) DeepCopy() *KubevirtManualStorageDriverConfig { - if in == nil { - return nil - } - out := new(KubevirtManualStorageDriverConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtNetwork) DeepCopyInto(out *KubevirtNetwork) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtNetwork. -func (in *KubevirtNetwork) DeepCopy() *KubevirtNetwork { - if in == nil { - return nil - } - out := new(KubevirtNetwork) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtNodePoolPlatform) DeepCopyInto(out *KubevirtNodePoolPlatform) { - *out = *in - if in.RootVolume != nil { - in, out := &in.RootVolume, &out.RootVolume - *out = new(KubevirtRootVolume) - (*in).DeepCopyInto(*out) - } - if in.Compute != nil { - in, out := &in.Compute, &out.Compute - *out = new(KubevirtCompute) - (*in).DeepCopyInto(*out) - } - if in.NetworkInterfaceMultiQueue != nil { - in, out := &in.NetworkInterfaceMultiQueue, &out.NetworkInterfaceMultiQueue - *out = new(MultiQueueSetting) - **out = **in - } - if in.AdditionalNetworks != nil { - in, out := &in.AdditionalNetworks, &out.AdditionalNetworks - *out = make([]KubevirtNetwork, len(*in)) - copy(*out, *in) - } - if in.AttachDefaultNetwork != nil { - in, out := &in.AttachDefaultNetwork, &out.AttachDefaultNetwork - *out = new(bool) - **out = **in - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.KubevirtHostDevices != nil { - in, out := &in.KubevirtHostDevices, &out.KubevirtHostDevices - *out = make([]KubevirtHostDevice, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtNodePoolPlatform. -func (in *KubevirtNodePoolPlatform) DeepCopy() *KubevirtNodePoolPlatform { - if in == nil { - return nil - } - out := new(KubevirtNodePoolPlatform) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtPersistentVolume) DeepCopyInto(out *KubevirtPersistentVolume) { - *out = *in - if in.Size != nil { - in, out := &in.Size, &out.Size - x := (*in).DeepCopy() - *out = &x - } - if in.StorageClass != nil { - in, out := &in.StorageClass, &out.StorageClass - *out = new(string) - **out = **in - } - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.VolumeMode != nil { - in, out := &in.VolumeMode, &out.VolumeMode - *out = new(corev1.PersistentVolumeMode) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPersistentVolume. -func (in *KubevirtPersistentVolume) DeepCopy() *KubevirtPersistentVolume { - if in == nil { - return nil - } - out := new(KubevirtPersistentVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtPlatformCredentials) DeepCopyInto(out *KubevirtPlatformCredentials) { - *out = *in - if in.InfraKubeConfigSecret != nil { - in, out := &in.InfraKubeConfigSecret, &out.InfraKubeConfigSecret - *out = new(KubeconfigSecretRef) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformCredentials. -func (in *KubevirtPlatformCredentials) DeepCopy() *KubevirtPlatformCredentials { - if in == nil { - return nil - } - out := new(KubevirtPlatformCredentials) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtPlatformSpec) DeepCopyInto(out *KubevirtPlatformSpec) { - *out = *in - if in.BaseDomainPassthrough != nil { - in, out := &in.BaseDomainPassthrough, &out.BaseDomainPassthrough - *out = new(bool) - **out = **in - } - if in.Credentials != nil { - in, out := &in.Credentials, &out.Credentials - *out = new(KubevirtPlatformCredentials) - (*in).DeepCopyInto(*out) - } - if in.StorageDriver != nil { - in, out := &in.StorageDriver, &out.StorageDriver - *out = new(KubevirtStorageDriverSpec) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformSpec. -func (in *KubevirtPlatformSpec) DeepCopy() *KubevirtPlatformSpec { - if in == nil { - return nil - } - out := new(KubevirtPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtRootVolume) DeepCopyInto(out *KubevirtRootVolume) { - *out = *in - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(KubevirtDiskImage) - (*in).DeepCopyInto(*out) - } - in.KubevirtVolume.DeepCopyInto(&out.KubevirtVolume) - if in.CacheStrategy != nil { - in, out := &in.CacheStrategy, &out.CacheStrategy - *out = new(KubevirtCachingStrategy) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtRootVolume. -func (in *KubevirtRootVolume) DeepCopy() *KubevirtRootVolume { - if in == nil { - return nil - } - out := new(KubevirtRootVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtStorageClassMapping) DeepCopyInto(out *KubevirtStorageClassMapping) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtStorageClassMapping. -func (in *KubevirtStorageClassMapping) DeepCopy() *KubevirtStorageClassMapping { - if in == nil { - return nil - } - out := new(KubevirtStorageClassMapping) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtStorageDriverSpec) DeepCopyInto(out *KubevirtStorageDriverSpec) { - *out = *in - if in.Manual != nil { - in, out := &in.Manual, &out.Manual - *out = new(KubevirtManualStorageDriverConfig) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtStorageDriverSpec. -func (in *KubevirtStorageDriverSpec) DeepCopy() *KubevirtStorageDriverSpec { - if in == nil { - return nil - } - out := new(KubevirtStorageDriverSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtVolume) DeepCopyInto(out *KubevirtVolume) { - *out = *in - if in.Persistent != nil { - in, out := &in.Persistent, &out.Persistent - *out = new(KubevirtPersistentVolume) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtVolume. -func (in *KubevirtVolume) DeepCopy() *KubevirtVolume { - if in == nil { - return nil - } - out := new(KubevirtVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtVolumeSnapshotClassMapping) DeepCopyInto(out *KubevirtVolumeSnapshotClassMapping) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtVolumeSnapshotClassMapping. -func (in *KubevirtVolumeSnapshotClassMapping) DeepCopy() *KubevirtVolumeSnapshotClassMapping { - if in == nil { - return nil - } - out := new(KubevirtVolumeSnapshotClassMapping) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerPublishingStrategy) DeepCopyInto(out *LoadBalancerPublishingStrategy) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerPublishingStrategy. -func (in *LoadBalancerPublishingStrategy) DeepCopy() *LoadBalancerPublishingStrategy { - if in == nil { - return nil - } - out := new(LoadBalancerPublishingStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineNetworkEntry) DeepCopyInto(out *MachineNetworkEntry) { - *out = *in - in.CIDR.DeepCopyInto(&out.CIDR) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineNetworkEntry. -func (in *MachineNetworkEntry) DeepCopy() *MachineNetworkEntry { - if in == nil { - return nil - } - out := new(MachineNetworkEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedEtcdSpec) DeepCopyInto(out *ManagedEtcdSpec) { - *out = *in - in.Storage.DeepCopyInto(&out.Storage) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedEtcdSpec. -func (in *ManagedEtcdSpec) DeepCopy() *ManagedEtcdSpec { - if in == nil { - return nil - } - out := new(ManagedEtcdSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedEtcdStorageSpec) DeepCopyInto(out *ManagedEtcdStorageSpec) { - *out = *in - if in.PersistentVolume != nil { - in, out := &in.PersistentVolume, &out.PersistentVolume - *out = new(PersistentVolumeEtcdStorageSpec) - (*in).DeepCopyInto(*out) - } - if in.RestoreSnapshotURL != nil { - in, out := &in.RestoreSnapshotURL, &out.RestoreSnapshotURL - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedEtcdStorageSpec. -func (in *ManagedEtcdStorageSpec) DeepCopy() *ManagedEtcdStorageSpec { - if in == nil { - return nil - } - out := new(ManagedEtcdStorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePool) DeepCopyInto(out *NodePool) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePool. -func (in *NodePool) DeepCopy() *NodePool { - if in == nil { - return nil - } - out := new(NodePool) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodePool) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePoolAutoScaling) DeepCopyInto(out *NodePoolAutoScaling) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolAutoScaling. -func (in *NodePoolAutoScaling) DeepCopy() *NodePoolAutoScaling { - if in == nil { - return nil - } - out := new(NodePoolAutoScaling) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePoolCondition) DeepCopyInto(out *NodePoolCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolCondition. -func (in *NodePoolCondition) DeepCopy() *NodePoolCondition { - if in == nil { - return nil - } - out := new(NodePoolCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePoolList) DeepCopyInto(out *NodePoolList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NodePool, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolList. -func (in *NodePoolList) DeepCopy() *NodePoolList { - if in == nil { - return nil - } - out := new(NodePoolList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodePoolList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePoolManagement) DeepCopyInto(out *NodePoolManagement) { - *out = *in - if in.Replace != nil { - in, out := &in.Replace, &out.Replace - *out = new(ReplaceUpgrade) - (*in).DeepCopyInto(*out) - } - if in.InPlace != nil { - in, out := &in.InPlace, &out.InPlace - *out = new(InPlaceUpgrade) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolManagement. -func (in *NodePoolManagement) DeepCopy() *NodePoolManagement { - if in == nil { - return nil - } - out := new(NodePoolManagement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePoolPlatform) DeepCopyInto(out *NodePoolPlatform) { - *out = *in - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSNodePoolPlatform) - (*in).DeepCopyInto(*out) - } - if in.IBMCloud != nil { - in, out := &in.IBMCloud, &out.IBMCloud - *out = new(IBMCloudPlatformSpec) - **out = **in - } - if in.Kubevirt != nil { - in, out := &in.Kubevirt, &out.Kubevirt - *out = new(KubevirtNodePoolPlatform) - (*in).DeepCopyInto(*out) - } - if in.Agent != nil { - in, out := &in.Agent, &out.Agent - *out = new(AgentNodePoolPlatform) - (*in).DeepCopyInto(*out) - } - if in.Azure != nil { - in, out := &in.Azure, &out.Azure - *out = new(AzureNodePoolPlatform) - (*in).DeepCopyInto(*out) - } - if in.PowerVS != nil { - in, out := &in.PowerVS, &out.PowerVS - *out = new(PowerVSNodePoolPlatform) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolPlatform. -func (in *NodePoolPlatform) DeepCopy() *NodePoolPlatform { - if in == nil { - return nil - } - out := new(NodePoolPlatform) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePoolPlatformStatus) DeepCopyInto(out *NodePoolPlatformStatus) { - *out = *in - if in.KubeVirt != nil { - in, out := &in.KubeVirt, &out.KubeVirt - *out = new(KubeVirtNodePoolStatus) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolPlatformStatus. -func (in *NodePoolPlatformStatus) DeepCopy() *NodePoolPlatformStatus { - if in == nil { - return nil - } - out := new(NodePoolPlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePoolSpec) DeepCopyInto(out *NodePoolSpec) { - *out = *in - out.Release = in.Release - in.Platform.DeepCopyInto(&out.Platform) - if in.NodeCount != nil { - in, out := &in.NodeCount, &out.NodeCount - *out = new(int32) - **out = **in - } - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - in.Management.DeepCopyInto(&out.Management) - if in.AutoScaling != nil { - in, out := &in.AutoScaling, &out.AutoScaling - *out = new(NodePoolAutoScaling) - **out = **in - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = make([]corev1.LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.NodeDrainTimeout != nil { - in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout - *out = new(v1.Duration) - **out = **in - } - if in.NodeVolumeDetachTimeout != nil { - in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout - *out = new(v1.Duration) - **out = **in - } - if in.NodeLabels != nil { - in, out := &in.NodeLabels, &out.NodeLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]Taint, len(*in)) - copy(*out, *in) - } - if in.PausedUntil != nil { - in, out := &in.PausedUntil, &out.PausedUntil - *out = new(string) - **out = **in - } - if in.TuningConfig != nil { - in, out := &in.TuningConfig, &out.TuningConfig - *out = make([]corev1.LocalObjectReference, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolSpec. -func (in *NodePoolSpec) DeepCopy() *NodePoolSpec { - if in == nil { - return nil - } - out := new(NodePoolSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePoolStatus) DeepCopyInto(out *NodePoolStatus) { - *out = *in - if in.Platform != nil { - in, out := &in.Platform, &out.Platform - *out = new(NodePoolPlatformStatus) - (*in).DeepCopyInto(*out) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NodePoolCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolStatus. -func (in *NodePoolStatus) DeepCopy() *NodePoolStatus { - if in == nil { - return nil - } - out := new(NodePoolStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodePortPublishingStrategy) DeepCopyInto(out *NodePortPublishingStrategy) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortPublishingStrategy. -func (in *NodePortPublishingStrategy) DeepCopy() *NodePortPublishingStrategy { - if in == nil { - return nil - } - out := new(NodePortPublishingStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeEtcdStorageSpec) DeepCopyInto(out *PersistentVolumeEtcdStorageSpec) { - *out = *in - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - *out = new(string) - **out = **in - } - if in.Size != nil { - in, out := &in.Size, &out.Size - x := (*in).DeepCopy() - *out = &x - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeEtcdStorageSpec. -func (in *PersistentVolumeEtcdStorageSpec) DeepCopy() *PersistentVolumeEtcdStorageSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeEtcdStorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { - *out = *in - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSPlatformSpec) - (*in).DeepCopyInto(*out) - } - if in.Agent != nil { - in, out := &in.Agent, &out.Agent - *out = new(AgentPlatformSpec) - **out = **in - } - if in.IBMCloud != nil { - in, out := &in.IBMCloud, &out.IBMCloud - *out = new(IBMCloudPlatformSpec) - **out = **in - } - if in.Azure != nil { - in, out := &in.Azure, &out.Azure - *out = new(AzurePlatformSpec) - **out = **in - } - if in.PowerVS != nil { - in, out := &in.PowerVS, &out.PowerVS - *out = new(PowerVSPlatformSpec) - (*in).DeepCopyInto(*out) - } - if in.Kubevirt != nil { - in, out := &in.Kubevirt, &out.Kubevirt - *out = new(KubevirtPlatformSpec) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec. -func (in *PlatformSpec) DeepCopy() *PlatformSpec { - if in == nil { - return nil - } - out := new(PlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { - *out = *in - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSPlatformStatus) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus. -func (in *PlatformStatus) DeepCopy() *PlatformStatus { - if in == nil { - return nil - } - out := new(PlatformStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PowerVSNodePoolPlatform) DeepCopyInto(out *PowerVSNodePoolPlatform) { - *out = *in - out.Processors = in.Processors - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(PowerVSResourceReference) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSNodePoolPlatform. -func (in *PowerVSNodePoolPlatform) DeepCopy() *PowerVSNodePoolPlatform { - if in == nil { - return nil - } - out := new(PowerVSNodePoolPlatform) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { - *out = *in - if in.Subnet != nil { - in, out := &in.Subnet, &out.Subnet - *out = new(PowerVSResourceReference) - (*in).DeepCopyInto(*out) - } - if in.VPC != nil { - in, out := &in.VPC, &out.VPC - *out = new(PowerVSVPC) - **out = **in - } - out.KubeCloudControllerCreds = in.KubeCloudControllerCreds - out.NodePoolManagementCreds = in.NodePoolManagementCreds - out.IngressOperatorCloudCreds = in.IngressOperatorCloudCreds - out.StorageOperatorCloudCreds = in.StorageOperatorCloudCreds - out.ImageRegistryOperatorCloudCreds = in.ImageRegistryOperatorCloudCreds -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformSpec. -func (in *PowerVSPlatformSpec) DeepCopy() *PowerVSPlatformSpec { - if in == nil { - return nil - } - out := new(PowerVSPlatformSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PowerVSResourceReference) DeepCopyInto(out *PowerVSResourceReference) { - *out = *in - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSResourceReference. -func (in *PowerVSResourceReference) DeepCopy() *PowerVSResourceReference { - if in == nil { - return nil - } - out := new(PowerVSResourceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PowerVSVPC) DeepCopyInto(out *PowerVSVPC) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSVPC. -func (in *PowerVSVPC) DeepCopy() *PowerVSVPC { - if in == nil { - return nil - } - out := new(PowerVSVPC) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Release) DeepCopyInto(out *Release) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release. -func (in *Release) DeepCopy() *Release { - if in == nil { - return nil - } - out := new(Release) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplaceUpgrade) DeepCopyInto(out *ReplaceUpgrade) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdate) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplaceUpgrade. -func (in *ReplaceUpgrade) DeepCopy() *ReplaceUpgrade { - if in == nil { - return nil - } - out := new(ReplaceUpgrade) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - if in.MaxSurge != nil { - in, out := &in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate. -func (in *RollingUpdate) DeepCopy() *RollingUpdate { - if in == nil { - return nil - } - out := new(RollingUpdate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoutePublishingStrategy) DeepCopyInto(out *RoutePublishingStrategy) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutePublishingStrategy. -func (in *RoutePublishingStrategy) DeepCopy() *RoutePublishingStrategy { - if in == nil { - return nil - } - out := new(RoutePublishingStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretEncryptionSpec) DeepCopyInto(out *SecretEncryptionSpec) { - *out = *in - if in.KMS != nil { - in, out := &in.KMS, &out.KMS - *out = new(KMSSpec) - (*in).DeepCopyInto(*out) - } - if in.AESCBC != nil { - in, out := &in.AESCBC, &out.AESCBC - *out = new(AESCBCSpec) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEncryptionSpec. -func (in *SecretEncryptionSpec) DeepCopy() *SecretEncryptionSpec { - if in == nil { - return nil - } - out := new(SecretEncryptionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceNetworkEntry) DeepCopyInto(out *ServiceNetworkEntry) { - *out = *in - in.CIDR.DeepCopyInto(&out.CIDR) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceNetworkEntry. -func (in *ServiceNetworkEntry) DeepCopy() *ServiceNetworkEntry { - if in == nil { - return nil - } - out := new(ServiceNetworkEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServicePublishingStrategy) DeepCopyInto(out *ServicePublishingStrategy) { - *out = *in - if in.NodePort != nil { - in, out := &in.NodePort, &out.NodePort - *out = new(NodePortPublishingStrategy) - **out = **in - } - if in.LoadBalancer != nil { - in, out := &in.LoadBalancer, &out.LoadBalancer - *out = new(LoadBalancerPublishingStrategy) - **out = **in - } - if in.Route != nil { - in, out := &in.Route, &out.Route - *out = new(RoutePublishingStrategy) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePublishingStrategy. -func (in *ServicePublishingStrategy) DeepCopy() *ServicePublishingStrategy { - if in == nil { - return nil - } - out := new(ServicePublishingStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServicePublishingStrategyMapping) DeepCopyInto(out *ServicePublishingStrategyMapping) { - *out = *in - in.ServicePublishingStrategy.DeepCopyInto(&out.ServicePublishingStrategy) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePublishingStrategyMapping. -func (in *ServicePublishingStrategyMapping) DeepCopy() *ServicePublishingStrategyMapping { - if in == nil { - return nil - } - out := new(ServicePublishingStrategyMapping) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Taint) DeepCopyInto(out *Taint) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. -func (in *Taint) DeepCopy() *Taint { - if in == nil { - return nil - } - out := new(Taint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UnmanagedEtcdSpec) DeepCopyInto(out *UnmanagedEtcdSpec) { - *out = *in - out.TLS = in.TLS -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedEtcdSpec. -func (in *UnmanagedEtcdSpec) DeepCopy() *UnmanagedEtcdSpec { - if in == nil { - return nil - } - out := new(UnmanagedEtcdSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Volume) DeepCopyInto(out *Volume) { - *out = *in - if in.Encrypted != nil { - in, out := &in.Encrypted, &out.Encrypted - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. -func (in *Volume) DeepCopy() *Volume { - if in == nil { - return nil - } - out := new(Volume) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go new file mode 100644 index 000000000..ccbd1a64c --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/agent.go @@ -0,0 +1,18 @@ +package v1beta1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// AgentNodePoolPlatform specifies the configuration of a NodePool when operating +// on the Agent platform. +type AgentNodePoolPlatform struct { + // AgentLabelSelector contains labels that must be set on an Agent in order to + // be selected for a Machine. + // +optional + AgentLabelSelector *metav1.LabelSelector `json:"agentLabelSelector,omitempty"` +} + +// AgentPlatformSpec specifies configuration for agent-based installations. +type AgentPlatformSpec struct { + // AgentNamespace is the namespace where to search for Agents for this cluster + AgentNamespace string `json:"agentNamespace"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go new file mode 100644 index 000000000..5b9b74b2a --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/aws.go @@ -0,0 +1,864 @@ +package v1beta1 + +// AWSNodePoolPlatform specifies the configuration of a NodePool when operating +// on AWS. +type AWSNodePoolPlatform struct { + // InstanceType is an ec2 instance type for node instances (e.g. m5.large). + InstanceType string `json:"instanceType"` + + // InstanceProfile is the AWS EC2 instance profile, which is a container for an IAM role that the EC2 instance uses. + InstanceProfile string `json:"instanceProfile,omitempty"` + + // +kubebuilder:validation:XValidation:rule="has(self.id) && self.id.startsWith('subnet-') ? !has(self.filters) : size(self.filters) > 0", message="subnet is invalid, a valid subnet id or filters must be set, but not both" + // +kubebuilder:validation:Required + // + // Subnet is the subnet to use for node instances. + Subnet AWSResourceReference `json:"subnet"` + + // AMI is the image id to use for node instances. If unspecified, the default + // is chosen based on the NodePool release payload image. + // + // +optional + AMI string `json:"ami,omitempty"` + + // SecurityGroups is an optional set of security groups to associate with node + // instances. + // + // +optional + SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"` + + // RootVolume specifies configuration for the root volume of node instances. + // + // +optional + RootVolume *Volume `json:"rootVolume,omitempty"` + + // ResourceTags is an optional list of additional tags to apply to AWS node + // instances. + // + // These will be merged with HostedCluster scoped tags, and HostedCluster tags + // take precedence in case of conflicts. + // + // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for + // information on tagging AWS resources. AWS supports a maximum of 50 tags per + // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available + // for the user. + // + // +kubebuilder:validation:MaxItems=25 + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` + + // placement specifies the placement options for the EC2 instances. + // + // +optional + Placement *PlacementOptions `json:"placement,omitempty"` +} + +// PlacementOptions specifies the placement options for the EC2 instances. +type PlacementOptions struct { + // Tenancy indicates if instance should run on shared or single-tenant hardware. + // + // Possible values: + // default: NodePool instances run on shared hardware. + // dedicated: Each NodePool instance runs on single-tenant hardware. + // host: NodePool instances run on user's pre-allocated dedicated hosts. + // + // +optional + // +kubebuilder:validation:Enum:=default;dedicated;host + Tenancy string `json:"tenancy,omitempty"` +} + +// AWSResourceReference is a reference to a specific AWS resource by ID or filters. +// Only one of ID or Filters may be specified. Specifying more than one will result in +// a validation error. +type AWSResourceReference struct { + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + + // Filters is a set of key/value pairs used to identify a resource + // They are applied according to the rules defined by the AWS API: + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html + // +optional + Filters []Filter `json:"filters,omitempty"` +} + +// Filter is a filter used to identify an AWS resource +type Filter struct { + // Name of the filter. Filter names are case-sensitive. + Name string `json:"name"` + + // Values includes one or more filter values. Filter values are case-sensitive. + Values []string `json:"values"` +} + +// Volume specifies the configuration options for node instance storage devices. +type Volume struct { + // Size specifies size (in Gi) of the storage device. + // + // Must be greater than the image snapshot size or 8 (whichever is greater). + // + // +kubebuilder:validation:Minimum=8 + Size int64 `json:"size"` + + // Type is the type of the volume. + Type string `json:"type"` + + // IOPS is the number of IOPS requested for the disk. This is only valid + // for type io1. + // + // +optional + IOPS int64 `json:"iops,omitempty"` + + // Encrypted is whether the volume should be encrypted or not. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Encrypted is immutable" + Encrypted *bool `json:"encrypted,omitempty"` + + // EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + // If Encrypted is set and this is omitted, the default AWS key will be used. + // The key must already exist and be accessible by the controller. + // +optional + EncryptionKey string `json:"encryptionKey,omitempty"` +} + +// AWSCloudProviderConfig specifies AWS networking configuration. +type AWSCloudProviderConfig struct { + // Subnet is the subnet to use for control plane cloud resources. + // + // +optional + Subnet *AWSResourceReference `json:"subnet,omitempty"` + + // Zone is the availability zone where control plane cloud resources are + // created. + // + // +optional + Zone string `json:"zone,omitempty"` + + // VPC is the VPC to use for control plane cloud resources. + VPC string `json:"vpc"` +} + +// AWSEndpointAccessType specifies the publishing scope of cluster endpoints. +type AWSEndpointAccessType string + +const ( + // Public endpoint access allows public API server access and public node + // communication with the control plane. + Public AWSEndpointAccessType = "Public" + + // PublicAndPrivate endpoint access allows public API server access and + // private node communication with the control plane. + PublicAndPrivate AWSEndpointAccessType = "PublicAndPrivate" + + // Private endpoint access allows only private API server access and private + // node communication with the control plane. + Private AWSEndpointAccessType = "Private" +) + +// AWSPlatformSpec specifies configuration for clusters running on Amazon Web Services. +type AWSPlatformSpec struct { + // Region is the AWS region in which the cluster resides. This configures the + // OCP control plane cloud integrations, and is used by NodePool to resolve + // the correct boot AMI for a given release. + // + // +immutable + Region string `json:"region"` + + // CloudProviderConfig specifies AWS networking configuration for the control + // plane. + // This is mainly used for cloud provider controller config: + // https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364 + // TODO(dan): should this be named AWSNetworkConfig? + // + // +optional + // +immutable + CloudProviderConfig *AWSCloudProviderConfig `json:"cloudProviderConfig,omitempty"` + + // ServiceEndpoints specifies optional custom endpoints which will override + // the default service endpoint of specific AWS Services. + // + // There must be only one ServiceEndpoint for a given service name. + // + // +optional + // +immutable + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // RolesRef contains references to various AWS IAM roles required to enable + // integrations such as OIDC. + // + // +immutable + RolesRef AWSRolesRef `json:"rolesRef"` + + // ResourceTags is a list of additional tags to apply to AWS resources created + // for the cluster. See + // https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for + // information on tagging AWS resources. AWS supports a maximum of 50 tags per + // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available + // for the user. + // + // +kubebuilder:validation:MaxItems=25 + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` + + // EndpointAccess specifies the publishing scope of cluster endpoints. The + // default is Public. + // + // +kubebuilder:validation:Enum=Public;PublicAndPrivate;Private + // +kubebuilder:default=Public + // +optional + EndpointAccess AWSEndpointAccessType `json:"endpointAccess,omitempty"` + + // AdditionalAllowedPrincipals specifies a list of additional allowed principal ARNs + // to be added to the hosted control plane's VPC Endpoint Service to enable additional + // VPC Endpoint connection requests to be automatically accepted. + // See https://docs.aws.amazon.com/vpc/latest/privatelink/configure-endpoint-service.html + // for more details around VPC Endpoint Service allowed principals. + // + // +optional + AdditionalAllowedPrincipals []string `json:"additionalAllowedPrincipals,omitempty"` + + // MultiArch specifies whether the Hosted Cluster will be expected to support NodePools with different + // CPU architectures, i.e., supporting arm64 NodePools and supporting amd64 NodePools on the same Hosted Cluster. + // Deprecated: This field is no longer used. The HyperShift Operator now performs multi-arch validations + // automatically despite the platform type. The HyperShift Operator will set HostedCluster.Status.PayloadArch based + // on the HostedCluster release image. This field is used by the NodePool controller to validate the + // NodePool.Spec.Arch is supported. + // +kubebuilder:default=false + // +optional + MultiArch bool `json:"multiArch"` + + // SharedVPC contains fields that must be specified if the HostedCluster must use a VPC that is + // created in a different AWS account and is shared with the AWS account where the HostedCluster + // will be created. + // + // +optional + SharedVPC *AWSSharedVPC `json:"sharedVPC,omitempty"` +} + +// AWSSharedVPC contains fields needed to create a HostedCluster using a VPC that has been +// created and shared from a different AWS account than the AWS account where the cluster +// is getting created. +type AWSSharedVPC struct { + + // RolesRef contains references to roles in the VPC owner account that enable a + // HostedCluster on a shared VPC. + // + // +kubebuilder:validation:Required + // +required + RolesRef AWSSharedVPCRolesRef `json:"rolesRef"` + + // LocalZoneID is the ID of the route53 hosted zone for [cluster-name].hypershift.local that is + // associated with the HostedCluster's VPC and exists in the VPC owner account. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=32 + // +required + LocalZoneID string `json:"localZoneID"` +} + +type AWSRoleCredentials struct { + ARN string `json:"arn"` + Namespace string `json:"namespace"` + Name string `json:"name"` +} + +// AWSResourceTag is a tag to apply to AWS resources created for the cluster. +type AWSResourceTag struct { + // Key is the key of the tag. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + Key string `json:"key"` + // Value is the value of the tag. + // + // Some AWS service do not support empty values. Since tags are added to + // resources in many services, the length of the tag value must meet the + // requirements of all services. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + Value string `json:"value"` +} + +// AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API. +type AWSRolesRef struct { + // The referenced role must have a trust relationship that allows it to be assumed via web identity. + // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. + // Example: + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Principal": { + // "Federated": "{{ .ProviderARN }}" + // }, + // "Action": "sts:AssumeRoleWithWebIdentity", + // "Condition": { + // "StringEquals": { + // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} + // } + // } + // } + // ] + // } + // + // IngressARN is an ARN value referencing a role appropriate for the Ingress Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "elasticloadbalancing:DescribeLoadBalancers", + // "tag:GetResources", + // "route53:ListHostedZones" + // ], + // "Resource": "*" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "route53:ChangeResourceRecordSets" + // ], + // "Resource": [ + // "arn:aws:route53:::PUBLIC_ZONE_ID", + // "arn:aws:route53:::PRIVATE_ZONE_ID" + // ] + // } + // ] + // } + IngressARN string `json:"ingressARN"` + + // ImageRegistryARN is an ARN value referencing a role appropriate for the Image Registry Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "s3:CreateBucket", + // "s3:DeleteBucket", + // "s3:PutBucketTagging", + // "s3:GetBucketTagging", + // "s3:PutBucketPublicAccessBlock", + // "s3:GetBucketPublicAccessBlock", + // "s3:PutEncryptionConfiguration", + // "s3:GetEncryptionConfiguration", + // "s3:PutLifecycleConfiguration", + // "s3:GetLifecycleConfiguration", + // "s3:GetBucketLocation", + // "s3:ListBucket", + // "s3:GetObject", + // "s3:PutObject", + // "s3:DeleteObject", + // "s3:ListBucketMultipartUploads", + // "s3:AbortMultipartUpload", + // "s3:ListMultipartUploadParts" + // ], + // "Resource": "*" + // } + // ] + // } + ImageRegistryARN string `json:"imageRegistryARN"` + + // StorageARN is an ARN value referencing a role appropriate for the Storage Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:AttachVolume", + // "ec2:CreateSnapshot", + // "ec2:CreateTags", + // "ec2:CreateVolume", + // "ec2:DeleteSnapshot", + // "ec2:DeleteTags", + // "ec2:DeleteVolume", + // "ec2:DescribeInstances", + // "ec2:DescribeSnapshots", + // "ec2:DescribeTags", + // "ec2:DescribeVolumes", + // "ec2:DescribeVolumesModifications", + // "ec2:DetachVolume", + // "ec2:ModifyVolume" + // ], + // "Resource": "*" + // } + // ] + // } + StorageARN string `json:"storageARN"` + + // NetworkARN is an ARN value referencing a role appropriate for the Network Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:DescribeInstances", + // "ec2:DescribeInstanceStatus", + // "ec2:DescribeInstanceTypes", + // "ec2:UnassignPrivateIpAddresses", + // "ec2:AssignPrivateIpAddresses", + // "ec2:UnassignIpv6Addresses", + // "ec2:AssignIpv6Addresses", + // "ec2:DescribeSubnets", + // "ec2:DescribeNetworkInterfaces" + // ], + // "Resource": "*" + // } + // ] + // } + NetworkARN string `json:"networkARN"` + + // KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC. + // Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Action": [ + // "autoscaling:DescribeAutoScalingGroups", + // "autoscaling:DescribeLaunchConfigurations", + // "autoscaling:DescribeTags", + // "ec2:DescribeAvailabilityZones", + // "ec2:DescribeInstances", + // "ec2:DescribeImages", + // "ec2:DescribeRegions", + // "ec2:DescribeRouteTables", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeSubnets", + // "ec2:DescribeVolumes", + // "ec2:CreateSecurityGroup", + // "ec2:CreateTags", + // "ec2:CreateVolume", + // "ec2:ModifyInstanceAttribute", + // "ec2:ModifyVolume", + // "ec2:AttachVolume", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:CreateRoute", + // "ec2:DeleteRoute", + // "ec2:DeleteSecurityGroup", + // "ec2:DeleteVolume", + // "ec2:DetachVolume", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:DescribeVpcs", + // "elasticloadbalancing:AddTags", + // "elasticloadbalancing:AttachLoadBalancerToSubnets", + // "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + // "elasticloadbalancing:CreateLoadBalancer", + // "elasticloadbalancing:CreateLoadBalancerPolicy", + // "elasticloadbalancing:CreateLoadBalancerListeners", + // "elasticloadbalancing:ConfigureHealthCheck", + // "elasticloadbalancing:DeleteLoadBalancer", + // "elasticloadbalancing:DeleteLoadBalancerListeners", + // "elasticloadbalancing:DescribeLoadBalancers", + // "elasticloadbalancing:DescribeLoadBalancerAttributes", + // "elasticloadbalancing:DetachLoadBalancerFromSubnets", + // "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + // "elasticloadbalancing:ModifyLoadBalancerAttributes", + // "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + // "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + // "elasticloadbalancing:AddTags", + // "elasticloadbalancing:CreateListener", + // "elasticloadbalancing:CreateTargetGroup", + // "elasticloadbalancing:DeleteListener", + // "elasticloadbalancing:DeleteTargetGroup", + // "elasticloadbalancing:DeregisterTargets", + // "elasticloadbalancing:DescribeListeners", + // "elasticloadbalancing:DescribeLoadBalancerPolicies", + // "elasticloadbalancing:DescribeTargetGroups", + // "elasticloadbalancing:DescribeTargetHealth", + // "elasticloadbalancing:ModifyListener", + // "elasticloadbalancing:ModifyTargetGroup", + // "elasticloadbalancing:RegisterTargets", + // "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + // "iam:CreateServiceLinkedRole", + // "kms:DescribeKey" + // ], + // "Resource": [ + // "*" + // ], + // "Effect": "Allow" + // } + // ] + // } + // +immutable + KubeCloudControllerARN string `json:"kubeCloudControllerARN"` + + // NodePoolManagementARN is an ARN value referencing a role appropriate for the CAPI Controller. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Action": [ + // "ec2:AssociateRouteTable", + // "ec2:AttachInternetGateway", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:CreateInternetGateway", + // "ec2:CreateNatGateway", + // "ec2:CreateRoute", + // "ec2:CreateRouteTable", + // "ec2:CreateSecurityGroup", + // "ec2:CreateSubnet", + // "ec2:CreateTags", + // "ec2:DeleteInternetGateway", + // "ec2:DeleteNatGateway", + // "ec2:DeleteRouteTable", + // "ec2:DeleteSecurityGroup", + // "ec2:DeleteSubnet", + // "ec2:DeleteTags", + // "ec2:DescribeAccountAttributes", + // "ec2:DescribeAddresses", + // "ec2:DescribeAvailabilityZones", + // "ec2:DescribeImages", + // "ec2:DescribeInstances", + // "ec2:DescribeInternetGateways", + // "ec2:DescribeNatGateways", + // "ec2:DescribeNetworkInterfaces", + // "ec2:DescribeNetworkInterfaceAttribute", + // "ec2:DescribeRouteTables", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeSubnets", + // "ec2:DescribeVpcs", + // "ec2:DescribeVpcAttribute", + // "ec2:DescribeVolumes", + // "ec2:DetachInternetGateway", + // "ec2:DisassociateRouteTable", + // "ec2:DisassociateAddress", + // "ec2:ModifyInstanceAttribute", + // "ec2:ModifyNetworkInterfaceAttribute", + // "ec2:ModifySubnetAttribute", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:RunInstances", + // "ec2:TerminateInstances", + // "tag:GetResources", + // "ec2:CreateLaunchTemplate", + // "ec2:CreateLaunchTemplateVersion", + // "ec2:DescribeLaunchTemplates", + // "ec2:DescribeLaunchTemplateVersions", + // "ec2:DeleteLaunchTemplate", + // "ec2:DeleteLaunchTemplateVersions" + // ], + // "Resource": [ + // "*" + // ], + // "Effect": "Allow" + // }, + // { + // "Condition": { + // "StringLike": { + // "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" + // } + // }, + // "Action": [ + // "iam:CreateServiceLinkedRole" + // ], + // "Resource": [ + // "arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing" + // ], + // "Effect": "Allow" + // }, + // { + // "Action": [ + // "iam:PassRole" + // ], + // "Resource": [ + // "arn:*:iam::*:role/*-worker-role" + // ], + // "Effect": "Allow" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "kms:Decrypt", + // "kms:ReEncrypt", + // "kms:GenerateDataKeyWithoutPlainText", + // "kms:DescribeKey" + // ], + // "Resource": "*" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "kms:CreateGrant" + // ], + // "Resource": "*", + // "Condition": { + // "Bool": { + // "kms:GrantIsForAWSResource": true + // } + // } + // } + // ] + // } + // + // +immutable + NodePoolManagementARN string `json:"nodePoolManagementARN"` + + // ControlPlaneOperatorARN is an ARN value referencing a role appropriate for the Control Plane Operator. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:CreateVpcEndpoint", + // "ec2:DescribeVpcEndpoints", + // "ec2:ModifyVpcEndpoint", + // "ec2:DeleteVpcEndpoints", + // "ec2:CreateTags", + // "route53:ListHostedZones", + // "ec2:CreateSecurityGroup", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:AuthorizeSecurityGroupEgress", + // "ec2:DeleteSecurityGroup", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:RevokeSecurityGroupEgress", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeVpcs", + // ], + // "Resource": "*" + // }, + // { + // "Effect": "Allow", + // "Action": [ + // "route53:ChangeResourceRecordSets", + // "route53:ListResourceRecordSets" + // ], + // "Resource": "arn:aws:route53:::%s" + // } + // ] + // } + // +immutable + ControlPlaneOperatorARN string `json:"controlPlaneOperatorARN"` +} + +// AWSSharedVPCRolesRef contains references to AWS IAM roles required for a shared VPC hosted cluster. +// These roles must exist in the VPC owner's account. +type AWSSharedVPCRolesRef struct { + // IngressARN is an ARN value referencing the role in the VPC owner account that allows the + // ingress operator in the cluster account to create and manage records in the private DNS + // hosted zone. + // + // The referenced role must have a trust relationship that allows it to be assumed by the + // ingress operator role in the VPC creator account. + // Example: + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Sid": "Statement1", + // "Effect": "Allow", + // "Principal": { + // "AWS": "arn:aws:iam::[cluster-creator-account-id]:role/[infra-id]-openshift-ingress" + // }, + // "Action": "sts:AssumeRole" + // } + // ] + // } + // + // The following is an example of the policy document for this role. + // (Based on https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa-shared-vpc-config.html#rosa-sharing-vpc-dns-and-roles_rosa-shared-vpc-config) + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "route53:ListHostedZones", + // "route53:ListHostedZonesByName", + // "route53:ChangeTagsForResource", + // "route53:GetAccountLimit", + // "route53:GetChange", + // "route53:GetHostedZone", + // "route53:ListTagsForResource", + // "route53:UpdateHostedZoneComment", + // "tag:GetResources", + // "tag:UntagResources" + // "route53:ChangeResourceRecordSets", + // "route53:ListResourceRecordSets" + // ], + // "Resource": "*" + // }, + // ] + // } + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // +required + IngressARN string `json:"ingressARN"` + + // ControlPlaneARN is an ARN value referencing the role in the VPC owner account that allows + // the control plane operator in the cluster account to create and manage a VPC endpoint, its + // corresponding Security Group, and DNS records in the hypershift local hosted zone. + // + // The referenced role must have a trust relationship that allows it to be assumed by the + // control plane operator role in the VPC creator account. + // Example: + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Sid": "Statement1", + // "Effect": "Allow", + // "Principal": { + // "AWS": "arn:aws:iam::[cluster-creator-account-id]:role/[infra-id]-control-plane-operator" + // }, + // "Action": "sts:AssumeRole" + // } + // ] + // } + // + // The following is an example of the policy document for this role. + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "ec2:CreateVpcEndpoint", + // "ec2:DescribeVpcEndpoints", + // "ec2:ModifyVpcEndpoint", + // "ec2:DeleteVpcEndpoints", + // "ec2:CreateTags", + // "route53:ListHostedZones", + // "ec2:CreateSecurityGroup", + // "ec2:AuthorizeSecurityGroupIngress", + // "ec2:AuthorizeSecurityGroupEgress", + // "ec2:DeleteSecurityGroup", + // "ec2:RevokeSecurityGroupIngress", + // "ec2:RevokeSecurityGroupEgress", + // "ec2:DescribeSecurityGroups", + // "ec2:DescribeVpcs", + // "route53:ChangeResourceRecordSets", + // "route53:ListResourceRecordSets" + // ], + // "Resource": "*" + // } + // ] + // } + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // +required + ControlPlaneARN string `json:"controlPlaneARN"` +} + +// AWSServiceEndpoint stores the configuration for services to +// override existing defaults of AWS Services. +type AWSServiceEndpoint struct { + // Name is the name of the AWS service. + // This must be provided and cannot be empty. + Name string `json:"name"` + + // URL is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// AWSKMSSpec defines metadata about the configuration of the AWS KMS Secret Encryption provider +type AWSKMSSpec struct { + // Region contains the AWS region + Region string `json:"region"` + // ActiveKey defines the active key used to encrypt new secrets + ActiveKey AWSKMSKeyEntry `json:"activeKey"` + // BackupKey defines the old key during the rotation process so previously created + // secrets can continue to be decrypted until they are all re-encrypted with the active key. + // +optional + BackupKey *AWSKMSKeyEntry `json:"backupKey,omitempty"` + // Auth defines metadata about the management of credentials used to interact with AWS KMS + Auth AWSKMSAuthSpec `json:"auth"` +} + +// AWSKMSAuthSpec defines metadata about the management of credentials used to interact and encrypt data via AWS KMS key. +type AWSKMSAuthSpec struct { + // The referenced role must have a trust relationship that allows it to be assumed via web identity. + // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. + // Example: + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Principal": { + // "Federated": "{{ .ProviderARN }}" + // }, + // "Action": "sts:AssumeRoleWithWebIdentity", + // "Condition": { + // "StringEquals": { + // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} + // } + // } + // } + // ] + // } + // + // AWSKMSARN is an ARN value referencing a role appropriate for managing the auth via the AWS KMS key. + // + // The following is an example of a valid policy document: + // + // { + // "Version": "2012-10-17", + // "Statement": [ + // { + // "Effect": "Allow", + // "Action": [ + // "kms:Encrypt", + // "kms:Decrypt", + // "kms:ReEncrypt*", + // "kms:GenerateDataKey*", + // "kms:DescribeKey" + // ], + // "Resource": %q + // } + // ] + // } + AWSKMSRoleARN string `json:"awsKms"` +} + +// AWSKMSKeyEntry defines metadata to locate the encryption key in AWS +type AWSKMSKeyEntry struct { + // ARN is the Amazon Resource Name for the encryption key + // +kubebuilder:validation:Pattern=`^arn:` + ARN string `json:"arn"` +} + +// AWSPlatformStatus contains status specific to the AWS platform +type AWSPlatformStatus struct { + // DefaultWorkerSecurityGroupID is the ID of a security group created by + // the control plane operator. It is always added to worker machines in + // addition to any security groups specified in the NodePool. + // +optional + DefaultWorkerSecurityGroupID string `json:"defaultWorkerSecurityGroupID,omitempty"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go new file mode 100644 index 000000000..5a65221a2 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/azure.go @@ -0,0 +1,571 @@ +package v1beta1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +// AzureVMImageType is used to specify the source of the Azure VM boot image. +// Valid values are ImageID and AzureMarketplace. +// +kubebuilder:validation:Enum:=ImageID;AzureMarketplace +type AzureVMImageType string + +const ( + // ImageID is the used to specify that an Azure resource ID of a VHD image is used to boot the Azure VMs from. + ImageID AzureVMImageType = "ImageID" + + // AzureMarketplace is used to specify the Azure Marketplace image info to use to boot the Azure VMs from. + AzureMarketplace AzureVMImageType = "AzureMarketplace" +) + +// AzureNodePoolPlatform is the platform specific configuration for an Azure node pool. +type AzureNodePoolPlatform struct { + // vmSize is the Azure VM instance type to use for the nodes being created in the nodepool. + // The size naming convention is documented here https://learn.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. + // Size names should start with a Family name, which is represented by one of more capital letters, and then be followed by the CPU count. + // This is followed by 0 or more additional features, represented by a, b, d, i, l, m, p, t, s, C, and NP, refer to the Azure documentation for an explanation of these features. + // Optionally an accelerator such as a GPU can be added, prefixed by an underscore, for example A100, H100 or MI300X. + // The size may also be versioned, in which case it should be suffixed with _v where the version is a number. + // For example, "D32ads_v5" would be a suitable general purpose VM size, or "ND96_MI300X_v5" would represent a GPU accelerated VM. + // + // +kubebuilder:validation:Pattern=`^(Standard_|Basic_)?[A-Z]+[0-9]+(-[0-9]+)?[abdilmptsCNP]*(_[A-Z]*[0-9]+[A-Z]*)?(_v[0-9]+)?$` + // +kubebuilder:validation:Required + // + Azure VM size format described in https://learn.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions + // + "[A-Z]+[0-9]+(-[0-9]+)?" - Series, size and constrained CPU size + // + "[abdilmptsCNP]*" - Additive features + // + "(_[A-Z]*[0-9]+[A-Z]*)?" - Optional accelerator types + VMSize string `json:"vmSize"` + + // image is used to configure the VM boot image. If unset, the default image at the location below will be used and + // is expected to exist: subscription//resourceGroups//providers/Microsoft.Compute/images/rhcos.x86_64.vhd. + // The and the are expected to be the same resource group documented in the + // Hosted Cluster specification respectively, HostedCluster.Spec.Platform.Azure.SubscriptionID and + // HostedCluster.Spec.Platform.Azure.ResourceGroupName. + // + // +kubebuilder:validation:Required + Image AzureVMImage `json:"image"` + + // osDisk provides configuration for the OS disk for the nodepool. + // This can be used to configure the size, storage account type, encryption options and whether the disk is persistent or ephemeral. + // When not provided, the platform will choose reasonable defaults which are subject to change over time. + // Review the fields within the osDisk for more details. + OSDisk AzureNodePoolOSDisk `json:"osDisk"` + + // availabilityZone is the failure domain identifier where the VM should be attached to. This must not be specified + // for clusters in a location that does not support AvailabilityZone because it would cause a failure from Azure API. + //kubebuilder:validation:XValidation:rule='availabilityZone in ["1", "2", "3"]' + // +optional + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // encryptionAtHost enables encryption at host on virtual machines. According to Microsoft documentation, this + // means data stored on the VM host is encrypted at rest and flows encrypted to the Storage service. See + // https://learn.microsoft.com/en-us/azure/virtual-machines/disks-enable-host-based-encryption-portal?tabs=azure-powershell + // for more information. + // + // +kubebuilder:default:=Enabled + // +kubebuilder:validation:Enum=Enabled;Disabled + // +optional + EncryptionAtHost string `json:"encryptionAtHost,omitempty"` + + // subnetID is the subnet ID of an existing subnet where the nodes in the nodepool will be created. This can be a + // different subnet than the one listed in the HostedCluster, HostedCluster.Spec.Platform.Azure.SubnetID, but must + // exist in the same network, HostedCluster.Spec.Platform.Azure.VnetID, and must exist under the same subscription ID, + // HostedCluster.Spec.Platform.Azure.SubscriptionID. + // subnetID is immutable once set. + // The subnetID should be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`. + // The subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12. + // The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis and must not end with a period (.) character. + // The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods and must not end with either a period (.) or hyphen (-) character. + // The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character and must not end with a period (.) or hyphen (-) character. + // + // +kubebuilder:validation:XValidation:rule="size(self.split('/')) == 11 && self.matches('^/subscriptions/.*/resourceGroups/.*/providers/Microsoft.Network/virtualNetworks/.*/subnets/.*$')",message="encryptionSetID must be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`" + // +kubeubilder:validation:XValidation:rule="self.split('/')[2].matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[4].matches('[a-zA-Z0-9-_\\(\\)\\.]{1,90}')`,message="The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[4].endsWith('.')",message="the resourceGroupName in the subnetID must not end with a period (.) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[8].matches('[a-zA-Z0-9-_\\.]{2,64}')`,message="The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[8].endsWith('.') && !self.split('/')[8].endsWith('-')",message="the vnetName in the subnetID must not end with either a period (.) or hyphen (-) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[10].matches('[a-zA-Z0-9][a-zA-Z0-9-_\\.]{0,79}')`,message="The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[10].endsWith('.') && !self.split('/')[10].endsWith('-')",message="the subnetName in the subnetID must not end with a period (.) or hyphen (-) character" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=355 + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="SubnetID is immutable" + // +kubebuilder:validation:Required + SubnetID string `json:"subnetID"` + + // diagnostics specifies the diagnostics settings for a virtual machine. + // If not specified, then Boot diagnostics will be disabled. + // +optional + Diagnostics *Diagnostics `json:"diagnostics,omitempty"` + + // machineIdentityID is a user-assigned identity assigned to the VMs used to authenticate with Azure services. The + // identify is expected to exist under the same resource group as HostedCluster.Spec.Platform.Azure.ResourceGroupName. This + // user assigned identity is expected to have the Contributor role assigned to it and scoped to the resource group + // under HostedCluster.Spec.Platform.Azure.ResourceGroupName. + // + // If this field is not supplied, the Service Principal credentials will be written to a file on the disk of each VM + // in order to be accessible by the cloud provider; the aforementioned credentials provided are the same ones as + // HostedCluster.Spec.Platform.Azure.Credentials. However, this is less secure than using a managed identity. + // + // TODO: What is the valid character set for this field? What about minimum and maximum lengths? + // + // +optional + MachineIdentityID string `json:"machineIdentityID,omitempty"` +} + +// AzureVMImage represents the different types of boot image sources that can be provided for an Azure VM. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'ImageID' ? has(self.imageID) : !has(self.imageID)",message="imageID is required when type is ImageID, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AzureMarketplace' ? has(self.azureMarketplace) : !has(self.azureMarketplace)",message="azureMarketplace is required when type is RequiredMember, and forbidden otherwise" +// +union +type AzureVMImage struct { + // type is the type of image data that will be provided to the Azure VM. + // Valid values are "ImageID" and "AzureMarketplace". + // ImageID means is used for legacy managed VM images. This is where the user uploads a VM image directly to their resource group. + // AzureMarketplace means the VM will boot from an Azure Marketplace image. + // Marketplace images are preconfigured and published by the OS vendors and may include preconfigured software for the VM. + // + // +kubebuilder:validation:Required + // +unionDiscriminator + Type AzureVMImageType `json:"type"` + + // imageID is the Azure resource ID of a VHD image to use to boot the Azure VMs from. + // TODO: What is the valid character set for this field? What about minimum and maximum lengths? + // + // +optional + // +unionMember + ImageID *string `json:"imageID,omitempty"` + + // azureMarketplace contains the Azure Marketplace image info to use to boot the Azure VMs from. + // + // +optional + // +unionMember + AzureMarketplace *AzureMarketplaceImage `json:"azureMarketplace,omitempty"` +} + +// AzureMarketplaceImage specifies the information needed to create an Azure VM from an Azure Marketplace image. +// + This struct replicates the same fields found in CAPZ - https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/main/api/v1beta1/types.go. +type AzureMarketplaceImage struct { + // publisher is the name of the organization that created the image. + // It must be between 3 and 50 characters in length, and consist of only lowercase letters, numbers, and hyphens (-) and underscores (_). + // It must start with a lowercase letter or a number. + // TODO: Can we explain where a user might find this value, or provide an example of one they might want to use + // + // +kubebuilder:validation:Pattern=`^[a-z0-9][a-z0-9-_]{2,49}$` + // +kubebuilder:validation:MinLength=3 + // +kubebuilder:validation:MaxLength=50 + // +kubeubilder:validation:Required + Publisher string `json:"publisher"` + + // offer specifies the name of a group of related images created by the publisher. + // TODO: What is the valid character set for this field? What about minimum and maximum lengths? + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + Offer string `json:"offer"` + + // sku specifies an instance of an offer, such as a major release of a distribution. + // For example, 22_04-lts-gen2, 8-lvm-gen2. + // The value must consist only of lowercase letters, numbers, and hyphens (-) and underscores (_). + // TODO: What about length limits? + // + // +kubebuilder:validation:Pattern=`^[a-z0-9-_]+$` + // +kubebuilder:validation:MinLength=1 + SKU string `json:"sku"` + + // version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, + // Minor, and Build are decimal numbers, e.g. '1.2.0'. Specify 'latest' to use the latest version of an image available at + // deployment time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a + // new version becomes available. + // + // +kubebuilder:validation:Pattern=`^[0-9]+\.[0-9]+\.[0-9]+$|^latest$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=32 + Version string `json:"version"` +} + +// AzureDiagnosticsStorageAccountType specifies the type of storage account for storing Azure VM diagnostics data. +// +kubebuilder:validation:Enum=Managed;UserManaged;Disabled +type AzureDiagnosticsStorageAccountType string + +func (a *AzureDiagnosticsStorageAccountType) String() string { + return string(*a) +} + +func (a *AzureDiagnosticsStorageAccountType) Set(s string) error { + switch s { + case string(AzureDiagnosticsStorageAccountTypeDisabled), string(AzureDiagnosticsStorageAccountTypeManaged), string(AzureDiagnosticsStorageAccountTypeUserManaged): + *a = AzureDiagnosticsStorageAccountType(s) + return nil + default: + return fmt.Errorf("unknown Azure diagnostics storage account type: %s", s) + } +} + +func (a *AzureDiagnosticsStorageAccountType) Type() string { + return "AzureDiagnosticsStorageAccountType" +} + +const ( + AzureDiagnosticsStorageAccountTypeDisabled = AzureDiagnosticsStorageAccountType("Disabled") + AzureDiagnosticsStorageAccountTypeManaged = AzureDiagnosticsStorageAccountType("Managed") + AzureDiagnosticsStorageAccountTypeUserManaged = AzureDiagnosticsStorageAccountType("UserManaged") +) + +// Diagnostics specifies the diagnostics settings for a virtual machine. +// +kubebuilder:validation:XValidation:rule="self.storageAccountType == 'UserManaged' ? has(self.userManaged) : !has(self.userManaged)", message="userManaged is required when storageAccountType is UserManaged, and forbidden otherwise" +// +union +type Diagnostics struct { + // storageAccountType determines if the storage account for storing the diagnostics data + // should be disabled (Disabled), provisioned by Azure (Managed) or by the user (UserManaged). + // +kubebuilder:validation:Enum=Managed;UserManaged;Disabled + // +kubebuilder:default:=Disabled + // +unionDiscriminator + // +optional + StorageAccountType AzureDiagnosticsStorageAccountType `json:"storageAccountType,omitempty"` + + // userManaged specifies the diagnostics settings for a virtual machine when the storage account is managed by the user. + // +optional + // +unionMember + UserManaged *UserManagedDiagnostics `json:"userManaged,omitempty"` +} + +// UserManagedDiagnostics specifies the diagnostics settings for a virtual machine when the storage account is managed by the user. +type UserManagedDiagnostics struct { + // storageAccountURI is the URI of the user-managed storage account. + // The URI typically will be `https://.blob.core.windows.net/` + // but may differ if you are using Azure DNS zone endpoints. + // You can find the correct endpoint by looking for the Blob Primary Endpoint in the + // endpoints tab in the Azure console or with the CLI by issuing + // `az storage account list --query='[].{name: name, "resource group": resourceGroup, "blob endpoint": primaryEndpoints.blob}'`. + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getScheme() == 'https'", message="storageAccountURI must be a valid HTTPS URL" + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:Required + StorageAccountURI string `json:"storageAccountURI,omitempty"` +} + +// +kubebuilder:validation:Enum=Premium_LRS;PremiumV2_LRS;Standard_LRS;StandardSSD_LRS;UltraSSD_LRS +type AzureDiskStorageAccountType string + +// Values copied from https://github.com/openshift/cluster-api-provider-azure/blob/release-4.18/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go#L614 +// excluding zone redundant storage(ZRS) types as they are not available in all regions. +const ( + // DiskStorageAccountTypesPremiumLRS - Premium SSD locally redundant storage. Best for production and performance sensitive + // workloads. + DiskStorageAccountTypesPremiumLRS AzureDiskStorageAccountType = "Premium_LRS" + // DiskStorageAccountTypesPremiumV2LRS - Premium SSD v2 locally redundant storage. Best for production and performance-sensitive + // workloads that consistently require low latency and high IOPS and throughput. + DiskStorageAccountTypesPremiumV2LRS AzureDiskStorageAccountType = "PremiumV2_LRS" + // DiskStorageAccountTypesStandardLRS - Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent + // access. + DiskStorageAccountTypesStandardLRS AzureDiskStorageAccountType = "Standard_LRS" + // DiskStorageAccountTypesStandardSSDLRS - Standard SSD locally redundant storage. Best for web servers, lightly used enterprise + // applications and dev/test. + DiskStorageAccountTypesStandardSSDLRS AzureDiskStorageAccountType = "StandardSSD_LRS" + // DiskStorageAccountTypesUltraSSDLRS - Ultra SSD locally redundant storage. Best for IO-intensive workloads such as SAP HANA, + // top tier databases (for example, SQL, Oracle), and other transaction-heavy workloads. + DiskStorageAccountTypesUltraSSDLRS AzureDiskStorageAccountType = "UltraSSD_LRS" +) + +// +kubebuilder:validation:Enum=Persistent;Ephemeral +type AzureDiskPersistence string + +const ( + // PersistentDiskPersistence is the persistent disk type. + PersistentDiskPersistence AzureDiskPersistence = "Persistent" + + // EphemeralDiskPersistence is the ephemeral disk type. + EphemeralDiskPersistence AzureDiskPersistence = "Ephemeral" +) + +// +kubebuilder:validation:XValidation:rule="!has(self.diskStorageAccountType) || self.diskStorageAccountType != 'UltraSSD_LRS' || self.sizeGiB <= 32767",message="When not using diskStorageAccountType UltraSSD_LRS, the SizeGB value must be less than or equal to 32,767" +type AzureNodePoolOSDisk struct { + // SizeGiB is the size in GiB (1024^3 bytes) to assign to the OS disk. + // This should be between 16 and 65,536 when using the UltraSSD_LRS storage account type and between 16 and 32,767 when using any other storage account type. + // When not set, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is 30. + // + // +kubebuilder:validation:Minimum=16 + // +kubebuilder:validation:Maximum=65536 + // +optional + SizeGiB int32 `json:"sizeGiB,omitempty"` + + // diskStorageAccountType is the disk storage account type to use. + // Valid values are Premium_LRS, PremiumV2_LRS, Standard_LRS, StandardSSD_LRS, UltraSSD_LRS. + // Note that Standard means a HDD. + // The disk performance is tied to the disk type, please refer to the Azure documentation for further details + // https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#disk-type-comparison. + // When omitted this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is Premium SSD LRS. + // + // +optional + DiskStorageAccountType AzureDiskStorageAccountType `json:"diskStorageAccountType,omitempty"` + + // encryptionSetID is the ID of the DiskEncryptionSet resource to use to encrypt the OS disks for the VMs. + // Configuring a DiskEncyptionSet allows greater control over the encryption of the VM OS disk at rest. + // Can be used with either platform (Azure) managed, or customer managed encryption keys. + // This needs to exist in the same subscription id listed in the Hosted Cluster, HostedCluster.Spec.Platform.Azure.SubscriptionID. + // DiskEncryptionSetID should also exist in a resource group under the same subscription id and the same location + // listed in the Hosted Cluster, HostedCluster.Spec.Platform.Azure.Location. + // The encryptionSetID should be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Copmute/diskEncryptionSets/{resourceName}`. + // The subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12. + // The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis and must not end with a period (.) character. + // The resourceName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores. + // TODO: Are there other encryption related options we may want to expose, should this be in a struct as well? + // + // +kubebuilder:validation:XValidation:rule="size(self.split('/')) == 9 && self.matches('^/subscriptions/.*/resourceGroups/.*/providers/Microsoft.Compute/diskEncryptionSets/.*$')",message="encryptionSetID must be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Copmute/diskEncryptionSets/{resourceName}`" + // +kubeubilder:validation:XValidation:rule="self.split('/')[2].matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[4].matches('[a-zA-Z0-9-_\\(\\)\\.]{1,90}')`,message="The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[4].endsWith('.')",message="the resourceGroupName in the encryptionSetID must not end with a period (.) character" + // +kubebuilder:validation:XValidation:rule="self.split('/')[8].matches('[a-zA-Z0-9-_]{1,80}')",message="The resourceName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores" + // +kubeubilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=285 + // +optional + EncryptionSetID string `json:"encryptionSetID,omitempty"` + + // persistence determines whether the OS disk should be persisted beyond the life of the VM. + // Valid values are Persistent and Ephemeral. + // When set to Ephmeral, the OS disk will not be persisted to Azure storage and implies restrictions to the VM size and caching type. + // Full details can be found in the Azure documentation https://learn.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks. + // Ephmeral disks are primarily used for stateless applications, provide lower latency than Persistent disks and also incur no storage costs. + // When not set, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // + // +optional + Persistence AzureDiskPersistence `json:"persistence,omitempty"` +} + +// AzurePlatformSpec specifies configuration for clusters running on Azure. Generally, the HyperShift API assumes bring +// your own (BYO) cloud infrastructure resources. For example, resources like a resource group, a subnet, or a vnet +// would be pre-created and then their names would be used respectively in the ResourceGroupName, SubnetName, VnetName +// fields of the Hosted Cluster CR. An existing cloud resource is expected to exist under the same SubscriptionID. +type AzurePlatformSpec struct { + // Credentials is the object containing existing Azure credentials needed for creating and managing cloud + // infrastructure resources. + // + // +kubebuilder:validation:Required + // +required + Credentials corev1.LocalObjectReference `json:"credentials"` + + // Cloud is the cloud environment identifier, valid values could be found here: https://github.com/Azure/go-autorest/blob/4c0e21ca2bbb3251fe7853e6f9df6397f53dd419/autorest/azure/environments.go#L33 + // + // +kubebuilder:validation:Enum=AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud + // +kubebuilder:default="AzurePublicCloud" + Cloud string `json:"cloud,omitempty"` + + // Location is the Azure region in where all the cloud infrastructure resources will be created. + // + // Example: eastus + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Location is immutable" + // +immutable + // +required + Location string `json:"location"` + + // ResourceGroupName is the name of an existing resource group where all cloud resources created by the Hosted + // Cluster are to be placed. The resource group is expected to exist under the same subscription as SubscriptionID. + // + // In ARO HCP, this will be the managed resource group where customer cloud resources will be created. + // + // Resource group naming requirements can be found here: https://azure.github.io/PSRule.Rules.Azure/en/rules/Azure.ResourceGroup.Name/. + // + //Example: if your resource group ID is /subscriptions//resourceGroups/, your + // ResourceGroupName is . + // + // +kubebuilder:default:=default + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_()\-\.]{1,89}[a-zA-Z0-9_()\-]$` + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ResourceGroupName is immutable" + // +immutable + // +required + ResourceGroupName string `json:"resourceGroup"` + + // VnetID is the ID of an existing VNET to use in creating VMs. The VNET can exist in a different resource group + // other than the one specified in ResourceGroupName, but it must exist under the same subscription as + // SubscriptionID. + // + // In ARO HCP, this will be the ID of the customer provided VNET. + // + // Example: /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks/ + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="VnetID is immutable" + // +immutable + // +required + VnetID string `json:"vnetID,omitempty"` + + // subnetID is the subnet ID of an existing subnet where the nodes in the nodepool will be created. This can be a + // different subnet than the one listed in the HostedCluster, HostedCluster.Spec.Platform.Azure.SubnetID, but must + // exist in the same network, HostedCluster.Spec.Platform.Azure.VnetID, and must exist under the same subscription ID, + // HostedCluster.Spec.Platform.Azure.SubscriptionID. + // subnetID is immutable once set. + // The subnetID should be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`. + // The subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12. + // The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis and must not end with a period (.) character. + // The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods and must not end with either a period (.) or hyphen (-) character. + // The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character and must not end with a period (.) or hyphen (-) character. + // + // +kubebuilder:validation:XValidation:rule="size(self.split('/')) == 11 && self.matches('^/subscriptions/.*/resourceGroups/.*/providers/Microsoft.Network/virtualNetworks/.*/subnets/.*$')",message="encryptionSetID must be in the format `/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}`" + // +kubeubilder:validation:XValidation:rule="self.split('/')[2].matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the subscriptionId in the encryptionSetID must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[4].matches('[a-zA-Z0-9-_\\(\\)\\.]{1,90}')`,message="The resourceGroupName should be between 1 and 90 characters, consisting only of alphanumeric characters, hyphens, underscores, periods and paranthesis" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[4].endsWith('.')",message="the resourceGroupName in the subnetID must not end with a period (.) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[8].matches('[a-zA-Z0-9-_\\.]{2,64}')`,message="The vnetName should be between 2 and 64 characters, consisting only of alphanumeric characters, hyphens, underscores and periods" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[8].endsWith('.') && !self.split('/')[8].endsWith('-')",message="the vnetName in the subnetID must not end with either a period (.) or hyphen (-) character" + // +kubebuilder:validation:XValidation:rule=`self.split('/')[10].matches('[a-zA-Z0-9][a-zA-Z0-9-_\\.]{0,79}')`,message="The subnetName should be between 1 and 80 characters, consisting only of alphanumeric characters, hyphens and underscores and must start with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="!self.split('/')[10].endsWith('.') && !self.split('/')[10].endsWith('-')",message="the subnetName in the subnetID must not end with a period (.) or hyphen (-) character" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=355 + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="SubnetID is immutable" + // +kubebuilder:validation:Required + SubnetID string `json:"subnetID"` + + // SubscriptionID is a unique identifier for an Azure subscription used to manage resources. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="SubscriptionID is immutable" + // +immutable + // +required + SubscriptionID string `json:"subscriptionID"` + + // SecurityGroupID is the ID of an existing security group on the SubnetID. This field is provided as part of the + // configuration for the Azure cloud provider, aka Azure cloud controller manager (CCM). This security group is + // expected to exist under the same subscription as SubscriptionID. + // + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="SecurityGroupID is immutable" + // +kubebuilder:validation:Required + // +immutable + // +required + SecurityGroupID string `json:"securityGroupID"` + + // managedIdentities contains the managed identities needed for HCP control plane and data plane components that + // authenticate with Azure's API. + // + // +kubebuilder:validation:Required + // +openshift:enable:FeatureGate=AROHCPManagedIdentities + ManagedIdentities AzureResourceManagedIdentities `json:"managedIdentities,omitempty"` +} + +// ManagedAzureKeyVault is an Azure Key Vault on the management cluster. +type ManagedAzureKeyVault struct { + // name is the name of the Azure Key Vault on the management cluster. + // + // +kubebuilder:validation:Required + Name string `json:"name"` + + // tenantID is the tenant ID of the Azure Key Vault on the management cluster. + // + // +kubebuilder:validation:Required + TenantID string `json:"tenantID"` +} + +// AzureResourceManagedIdentities contains the managed identities needed for HCP control plane and data plane components +// that authenticate with Azure's API. +type AzureResourceManagedIdentities struct { + // controlPlane contains the client IDs of all the managed identities on the HCP control plane needing to + // authenticate with Azure's API. + // + // +kubebuilder:validation:Required + ControlPlane ControlPlaneManagedIdentities `json:"controlPlane"` + + // Future placeholder - DataPlaneMIs * DataPlaneManagedIdentities +} + +// ManagedIdentity contains the client ID, and its certificate name, of a managed identity. This managed identity is +// used, by an HCP component, to authenticate with the Azure API. +type ManagedIdentity struct { + // clientID is the client ID of a managed identity. + // + // +kubebuilder:validation:XValidation:rule="self.matches('^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$')",message="the client ID of a managed identity must be a valid UUID. It should be 5 groups of hyphen separated hexadecimal characters in the form 8-4-4-4-12." + // +kubebuilder:validation:Required + ClientID string `json:"clientID"` + + // certificateName is the name of the certificate backing the managed identity. This certificate is expected to + // reside in an Azure Key Vault on the management cluster. + // + // +kubebuilder:validation:Required + CertificateName string `json:"certificateName"` +} + +// ControlPlaneManagedIdentities contains the managed identities on the HCP control plane needing to authenticate with +// Azure's API. +type ControlPlaneManagedIdentities struct { + // managedIdentitiesKeyVault contains information on the management cluster's managed identities Azure Key Vault. + // This Key Vault is where the managed identities certificates are stored. These certificates are pulled out of the + // Key Vault by the Secrets Store CSI driver and mounted into a volume on control plane pods requiring + // authentication with Azure API. + // + // More information on how the Secrets Store CSI driver works to do this can be found here: + // https://learn.microsoft.com/en-us/azure/aks/csi-secrets-store-driver. + // + // +kubebuilder:validation:Required + ManagedIdentitiesKeyVault ManagedAzureKeyVault `json:"managedIdentitiesKeyVault"` + + // cloudProvider is a pre-existing managed identity associated with the azure cloud provider, aka cloud controller + // manager. + // + // +kubebuilder:validation:Required + CloudProvider ManagedIdentity `json:"cloudProvider"` + + // nodePoolManagement is a pre-existing managed identity associated with the operator managing the NodePools. + // + // +kubebuilder:validation:Required + NodePoolManagement ManagedIdentity `json:"nodePoolManagement"` + + // controlPlaneOperator is a pre-existing managed identity associated with the control plane operator. + // + // +kubebuilder:validation:Required + ControlPlaneOperator ManagedIdentity `json:"controlPlaneOperator"` + + // imageRegistry is a pre-existing managed identity associated with the cluster-image-registry-operator. + // + // +kubebuilder:validation:Required + ImageRegistry ManagedIdentity `json:"imageRegistry"` + + // ingress is a pre-existing managed identity associated with the cluster-ingress-operator. + // + // +kubebuilder:validation:Required + Ingress ManagedIdentity `json:"ingress"` + + // network is a pre-existing managed identity associated with the cluster-network-operator. + // + // +kubebuilder:validation:Required + Network ManagedIdentity `json:"network"` + + // diskClientID is a pre-existing managed identity associated with the azure-disk-controller. + // + // +kubebuilder:validation:Required + Disk ManagedIdentity `json:"disk"` + + // fileClientID is a pre-existing managed identity associated with the azure-disk-controller. + // + // +kubebuilder:validation:Required + File ManagedIdentity `json:"file"` +} + +// AzureKMSSpec defines metadata about the configuration of the Azure KMS Secret Encryption provider using Azure key vault +type AzureKMSSpec struct { + // ActiveKey defines the active key used to encrypt new secrets + // + // +kubebuilder:validation:Required + ActiveKey AzureKMSKey `json:"activeKey"` + // BackupKey defines the old key during the rotation process so previously created + // secrets can continue to be decrypted until they are all re-encrypted with the active key. + // +optional + BackupKey *AzureKMSKey `json:"backupKey,omitempty"` + + // kms is a pre-existing managed identity used to authenticate with Azure KMS. + // + // +kubebuilder:validation:Required + // +openshift:enable:FeatureGate=AROHCPManagedIdentities + KMS ManagedIdentity `json:"kms"` +} + +type AzureKMSKey struct { + // KeyVaultName is the name of the keyvault. Must match criteria specified at https://docs.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates#vault-name-and-object-name + // Your Microsoft Entra application used to create the cluster must be authorized to access this keyvault, e.g using the AzureCLI: + // `az keyvault set-policy -n $KEYVAULT_NAME --key-permissions decrypt encrypt --spn ` + KeyVaultName string `json:"keyVaultName"` + // KeyName is the name of the keyvault key used for encrypt/decrypt + KeyName string `json:"keyName"` + // KeyVersion contains the version of the key to use + KeyVersion string `json:"keyVersion"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go index 00d7ce48d..37afedc2d 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go @@ -24,8 +24,7 @@ type CertificateSigningRequestApprovalSpec struct{} // CertificateSigningRequestApprovalStatus defines the observed state of CertificateSigningRequestApproval type CertificateSigningRequestApprovalStatus struct{} -// +kubebuilder:object:root=true - +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CertificateSigningRequestApprovalList contains a list of CertificateSigningRequestApprovals. type CertificateSigningRequestApprovalList struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go index 77f45afcb..4a765689f 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/clusterconfig.go @@ -13,3 +13,38 @@ func (c *ClusterConfiguration) GetNetwork() *configv1.NetworkSpec { retu func (c *ClusterConfiguration) GetOAuth() *configv1.OAuthSpec { return c.OAuth } func (c *ClusterConfiguration) GetScheduler() *configv1.SchedulerSpec { return c.Scheduler } func (c *ClusterConfiguration) GetProxy() *configv1.ProxySpec { return c.Proxy } + +func (c *ClusterConfiguration) GetTLSSecurityProfile() *configv1.TLSSecurityProfile { + if c != nil && c.APIServer != nil { + return c.APIServer.TLSSecurityProfile + } + return nil +} + +func (c *ClusterConfiguration) GetAutoAssignCIDRs() []string { + if c != nil && c.Network != nil && c.Network.ExternalIP != nil { + return c.Network.ExternalIP.AutoAssignCIDRs + } + return nil +} + +func (c *ClusterConfiguration) GetAuditPolicyConfig() configv1.Audit { + if c != nil && c.APIServer != nil && c.APIServer.Audit.Profile != "" { + return c.APIServer.Audit + } + return configv1.Audit{Profile: configv1.DefaultAuditProfileType} +} + +func (c *ClusterConfiguration) GetFeatureGateSelection() configv1.FeatureGateSelection { + if c != nil && c.FeatureGate != nil { + return c.FeatureGate.FeatureGateSelection + } + return configv1.FeatureGateSelection{FeatureSet: configv1.Default} +} + +func (c *ClusterConfiguration) GetNamedCertificates() []configv1.APIServerNamedServingCert { + if c != nil && c.APIServer != nil { + return c.APIServer.ServingCerts.NamedCertificates + } + return []configv1.APIServerNamedServingCert{} +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go new file mode 100644 index 000000000..6ca9e97ac --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/controlplanecomponent_types.go @@ -0,0 +1,96 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ControlPlaneComponent{}, + &ControlPlaneComponentList{}, + ) + return nil + }) +} + +const ( + // ControlPlaneComponentAvailable indicates whether the ControlPlaneComponent is available. + ControlPlaneComponentAvailable ConditionType = "Available" + // ControlPlaneComponentProgressing indicates whether the ControlPlaneComponent is progressing. + ControlPlaneComponentProgressing ConditionType = "Progressing" + + // WaitingForDependenciesReason indicates that there are unavailable dependencies blocking the ControlPlaneComponent reconcilation. + WaitingForDependenciesReason string = "WaitingForDependencies" + // ReconciliationErrorReason indicates that there was an error during the reconcilation of the ControlPlaneComponent. + ReconciliationErrorReason string = "ReconciliationError" +) + +// ControlPlaneComponentSpec defines the desired state of ControlPlaneComponent +type ControlPlaneComponentSpec struct { +} + +// ComponentResource defines a resource reconciled by a ControlPlaneComponent. +type ComponentResource struct { + // kind is the name of the resource schema. + // +required + Kind string `json:"kind"` + + // group is the API group for this resource type. + // +required + Group string `json:"group"` + + // name is the name of this resource. + // +required + Name string `json:"name"` +} + +// ControlPlaneComponentStatus defines the observed state of ControlPlaneComponent +type ControlPlaneComponentStatus struct { + // version reports the current version of this component. + // +optional + Version string `json:"version,omitempty"` + + // resources is a list of the resources reconciled by this component. + // +optional + Resources []ComponentResource `json:"resources,omitempty"` + + // Conditions contains details for the current state of the ControlPlane Component. + // If there is an error, then the Available condition will be false. + // + // Current condition types are: "Available" + // +optional + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=controlplanecomponents,shortName=cpc;cpcs,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="Version" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].status",description="Available" +// +kubebuilder:printcolumn:name="Progressing",type="string",JSONPath=".status.conditions[?(@.type==\"Progressing\")].status",description="Progressing" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Available\")].message",description="Message" +// +kubebuilder:printcolumn:name="ProgressingMessage",type="string",priority=1,JSONPath=".status.conditions[?(@.type==\"Progressing\")].message",description="ProgressingMessage" +// ControlPlaneComponent specifies the state of a ControlPlane Component +// +openshift:enable:FeatureGate=ControlPlaneV2 +type ControlPlaneComponent struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ControlPlaneComponentSpec `json:"spec,omitempty"` + Status ControlPlaneComponentStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// ControlPlaneComponentList contains a list of ControlPlaneComponent +type ControlPlaneComponentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ControlPlaneComponent `json:"items"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go index 7eeaf3eb7..c0850c171 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/endpointservice_types.go @@ -62,6 +62,9 @@ type AWSEndpointServiceStatus struct { // +optional DNSZoneID string `json:"dnsZoneID,omitempty"` + // SecurityGroupID is the ID for the VPC endpoint SecurityGroup + SecurityGroupID string `json:"securityGroupID,omitempty"` + // Conditions contains details for the current state of the Endpoint Service // request If there is an error processing the request e.g. the NLB doesn't // exist, then the Available condition will be false, reason AWSErrorReason, @@ -89,8 +92,8 @@ type AWSEndpointService struct { Status AWSEndpointServiceStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true // AWSEndpointServiceList contains a list of AWSEndpointService +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type AWSEndpointServiceList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go index 4fc32d35c..3241a27e4 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/groupversion_info.go @@ -4,7 +4,6 @@ package v1beta1 import ( - "github.com/openshift/hypershift/api/hypershift" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -12,7 +11,7 @@ import ( var ( // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: hypershift.GroupName, Version: "v1beta1"} + GroupVersion = schema.GroupVersion{Group: "hypershift.openshift.io", Version: "v1beta1"} SchemeGroupVersion = GroupVersion diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go index 31da048ac..a9aa8e81a 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hosted_controlplane.go @@ -117,6 +117,8 @@ type HostedControlPlaneSpec struct { // Services defines metadata about how control plane services are published // in the management cluster. + // +kubebuilder:validation:MaxItems=6 + // +kubebuilder:validation:MinItems=4 Services []ServicePublishingStrategyMapping `json:"services"` // AuditWebhook contains metadata for configuring an audit webhook @@ -182,9 +184,22 @@ type HostedControlPlaneSpec struct { // // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // labels when specified, define what custom labels are added to the hcp pods. + // Changing this day 2 will cause a rollout of all hcp pods. + // Duplicate keys are not supported. If duplicate keys are defined, only the last key/value pair is preserved. + // Valid values are those in https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + // + // -kubebuilder:validation:XValidation:rule=`self.all(key, size(key) <= 317 && key.matches('^(([A-Za-z0-9]+(\\.[A-Za-z0-9]+)?)*[A-Za-z0-9]\\/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$'))`, message="label key must have two segments: an optional prefix and name, separated by a slash (/). The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots (.), not longer than 253 characters in total, followed by a slash (/)" + // -kubebuilder:validation:XValidation:rule=`self.all(key, size(self[key]) <= 63 && self[key].matches('^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$'))`, message="label value must be 63 characters or less (can be empty), consist of alphanumeric characters, dashes (-), underscores (_) or dots (.), and begin and end with an alphanumeric character" + // TODO: key/value validations break cost budget for <=4.17. We should figure why and enable it back. + // +kubebuilder:validation:MaxProperties=20 + // +optional + Labels map[string]string `json:"labels,omitempty"` } -// AvailabilityPolicy specifies a high level availability policy for components. +// availabilityPolicy specifies a high level availability policy for components. +// +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica type AvailabilityPolicy string const ( @@ -313,8 +328,8 @@ type APIEndpoint struct { Port int32 `json:"port"` } -// +kubebuilder:object:root=true // HostedControlPlaneList contains a list of HostedControlPlanes. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type HostedControlPlaneList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go index 5c810022a..c6b876ab3 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_conditions.go @@ -67,6 +67,8 @@ const ( ClusterVersionAvailable ConditionType = "ClusterVersionAvailable" // ClusterVersionReleaseAccepted bubbles up Failing ReleaseAccepted from the CVO. ClusterVersionReleaseAccepted ConditionType = "ClusterVersionReleaseAccepted" + // ClusterVersionRetrievedUpdates bubbles up RetrievedUpdates from the CVO. + ClusterVersionRetrievedUpdates ConditionType = "ClusterVersionRetrievedUpdates" // UnmanagedEtcdAvailable indicates whether a user-managed etcd cluster is // healthy. @@ -99,6 +101,11 @@ const ( // A failure here may require external user intervention to resolve. E.g. oidc was deleted out of band. ValidOIDCConfiguration ConditionType = "ValidOIDCConfiguration" + // ValidIDPConfiguration indicates if the Identity Provider configuration is valid. + // A failure here may require external user intervention to resolve + // e.g. the user-provided IDP configuration provided is invalid or the IDP is not reachable. + ValidIDPConfiguration ConditionType = "ValidIDPConfiguration" + // ValidReleaseImage indicates if the release image set in the spec is valid // for the HostedCluster. For example, this can be set false if the // HostedCluster itself attempts an unsupported version before 4.9 or an @@ -111,6 +118,10 @@ const ( // performance degradation due to fragmentation of the double encapsulation in ovn-kubernetes ValidKubeVirtInfraNetworkMTU ConditionType = "ValidKubeVirtInfraNetworkMTU" + // KubeVirtNodesLiveMigratable indicates if all nodes (VirtualMachines) of the kubevirt + // hosted cluster can be live migrated without experiencing a node restart + KubeVirtNodesLiveMigratable ConditionType = "KubeVirtNodesLiveMigratable" + // ValidAWSIdentityProvider indicates if the Identity Provider referenced // in the cloud credentials is healthy. E.g. for AWS the idp ARN is referenced in the iam roles. // "Version": "2012-10-17", @@ -165,6 +176,10 @@ const ( // A failure here often means a software bug or a non-stable cluster. ReconciliationSucceeded ConditionType = "ReconciliationSucceeded" + // EtcdRecoveryActive indicates that the Etcd cluster is failing and the + // recovery job was triggered. + EtcdRecoveryActive ConditionType = "EtcdRecoveryActive" + // ClusterSizeComputed indicates that a t-shirt size was computed for this HostedCluster. // The last transition time for this condition is used to manage how quickly transitions occur. ClusterSizeComputed = "ClusterSizeComputed" @@ -192,6 +207,7 @@ const ( EtcdQuorumAvailableReason = "QuorumAvailable" EtcdWaitingForQuorumReason = "EtcdWaitingForQuorum" EtcdStatefulSetNotFoundReason = "StatefulSetNotFound" + EtcdRecoveryJobFailedReason = "EtcdRecoveryJobFailed" UnmanagedEtcdMisconfiguredReason = "UnmanagedEtcdMisconfigured" UnmanagedEtcdAsExpected = "UnmanagedEtcdAsExpected" @@ -206,6 +222,7 @@ const ( PlatformCredentialsNotFoundReason = "PlatformCredentialsNotFound" InvalidImageReason = "InvalidImage" InvalidIdentityProvider = "InvalidIdentityProvider" + PayloadArchNotFoundReason = "PayloadArchNotFound" InvalidIAMRoleReason = "InvalidIAMRole" @@ -222,6 +239,8 @@ const ( ReconciliationInvalidPausedUntilConditionReason = "InvalidPausedUntilValue" KubeVirtSuboptimalMTUReason = "KubeVirtSuboptimalMTUDetected" + + KubeVirtNodesLiveMigratableReason = "KubeVirtNodesNotLiveMigratable" ) // Messages. diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go index df34b9dec..ea436b86c 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/hostedcluster_types.go @@ -4,14 +4,12 @@ import ( "fmt" "strings" + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/hypershift/api/util/ipnet" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - - configv1 "github.com/openshift/api/config/v1" - - "github.com/openshift/hypershift/api/util/ipnet" ) func init() { @@ -21,6 +19,7 @@ func init() { &HostedClusterList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil }) } @@ -134,8 +133,8 @@ const ( // AWSCredentialsFileSecretKey defines the Kubernetes secret key name that contains // the customer AWS credentials in the unmanaged authentication strategy for AWS KMS secret encryption AWSCredentialsFileSecretKey = "credentials" - // ControlPlaneComponent identifies a resource as belonging to a hosted control plane. - ControlPlaneComponent = "hypershift.openshift.io/control-plane-component" + // ControlPlaneComponentLabel identifies a resource as belonging to a hosted control plane. + ControlPlaneComponentLabel = "hypershift.openshift.io/control-plane-component" // OperatorComponent identifies a component as belonging to the operator. OperatorComponent = "hypershift.openshift.io/operator-component" @@ -267,6 +266,14 @@ const ( // the memory footprint of the kube-apiserver during upgrades. KubeAPIServerGOMemoryLimitAnnotation = "hypershift.openshift.io/kube-apiserver-gomemlimit" + // KubeAPIServerMaximumRequestsInFlight allows overriding the default value for the kube-apiserver max-requests-inflight + // flag. This allows controlling how many concurrent requests can be handled by the Kube API server at any given time. + KubeAPIServerMaximumRequestsInFlight = "hypershift.openshift.io/kube-apiserver-max-requests-inflight" + + // KubeAPIServerMaximumMutatingRequestsInFlight allows overring the default value for the kube-apiserver max-mutating-requests-inflight + // flag. This allows controlling how many mutating concurrent requests can be handled by the Kube API server at any given time. + KubeAPIServerMaximumMutatingRequestsInFlight = "hypershift.openshift.io/kube-apiserver-max-mutating-requests-inflight" + // AWSLoadBalancerSubnetsAnnotation allows specifying the subnets to use for control plane load balancers // in the AWS platform. AWSLoadBalancerSubnetsAnnotation = "hypershift.openshift.io/aws-load-balancer-subnets" @@ -293,6 +300,11 @@ const ( // one on the NodePool takes precedence. The value is a go duration string with a number and a unit (ie. 8m, 1h, etc) MachineHealthCheckTimeoutAnnotation = "hypershift.openshift.io/machine-health-check-timeout" + // MachineHealthCheckNodeStartupTimeoutAnnotation allows overriding the default machine health check timeout for + // node startup on nodepools. The annotation can be set in either the HostedCluster or the NodePool. If set on both, the + // one on the NodePool takes precedence. The value is a go duration string with a number and a unit (ie. 8m, 1h, etc) + MachineHealthCheckNodeStartupTimeoutAnnotation = "hypershift.openshift.io/machine-health-check-node-startup-timeout" + // MachineHealthCheckMaxUnhealthyAnnotation allows overriding the max unhealthy value of the machine // health check created for a NodePool. The annotation can be set in either the HostedCluster or the NodePool. // If set on both, the one on the NodePool takes precedence. The value can be a number or a percentage value. @@ -318,147 +330,202 @@ const ( // This annotation signals to the NodePool controller that it is safe to use TopologySpreadConstraints on a NodePool // without triggering an unexpected update of KubeVirt VMs. NodePoolSupportsKubevirtTopologySpreadConstraintsAnnotation = "hypershift.openshift.io/nodepool-supports-kubevirt-topology-spread-constraints" + + // IsKubeVirtRHCOSVolumeLabelName labels rhcos DataVolumes and PVCs, to be able to filter them, e.g. for backup + IsKubeVirtRHCOSVolumeLabelName = "hypershift.openshift.io/is-kubevirt-rhcos" + + // SkipControlPlaneNamespaceDeletionAnnotation tells the the hosted cluster controller not to delete the hosted control plane + // namespace during hosted cluster deletion when this annotation is set to the value "true". + SkipControlPlaneNamespaceDeletionAnnotation = "hypershift.openshift.io/skip-delete-hosted-controlplane-namespace" + + // DisableIgnitionServerAnnotation controls skipping of the ignition server deployment. + DisableIgnitionServerAnnotation = "hypershift.openshift.io/disable-ignition-server" + + // ControlPlaneOperatorV2Annotation tells the hosted cluster to set 'CPO_V2' env variable on the CPO deployment which enables + // the new manifest based CPO implementation. + ControlPlaneOperatorV2Annotation = "hypershift.openshift.io/cpo-v2" + + // ControlPlaneOperatorV2EnvVar when set on the CPO deplyoment, enables the new manifest based CPO implementation. + ControlPlaneOperatorV2EnvVar = "CPO_V2" ) // HostedClusterSpec is the desired behavior of a HostedCluster. + +// +kubebuilder:validation:XValidation:rule=`self.platform.type != "IBMCloud" ? self.services == oldSelf.services : true`, message="Services is immutable. Changes might result in unpredictable and disruptive behavior." +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "APIServer" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires APIServer Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "OAuthServer" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires OAuthServer Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "Konnectivity" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires Konnectivity Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`self.platform.type == "Azure" ? self.services.exists(s, s.service == "Ignition" && s.servicePublishingStrategy.type == "Route" && s.servicePublishingStrategy.route.hostname != "") : true`,message="Azure platform requires Ignition Route service with a hostname to be defined" +// +kubebuilder:validation:XValidation:rule=`has(self.issuerURL) || !has(self.serviceAccountSigningKey)`,message="If serviceAccountSigningKey is set, issuerURL must be set" + type HostedClusterSpec struct { - // Release specifies the desired OCP release payload for the hosted cluster. - // - // Updating this field will trigger a rollout of the control plane. The - // behavior of the rollout will be driven by the ControllerAvailabilityPolicy - // and InfrastructureAvailabilityPolicy. + // release specifies the desired OCP release payload for all the hosted cluster components. + // This includes those components running management side like the Kube API Server and the CVO but also the operands which land in the hosted cluster data plane like the ingress controller, ovn agents, etc. + // The maximum and minimum supported release versions are determined by the running Hypersfhit Operator. + // Attempting to use an unsupported version will result in the HostedCluster being degraded and the validateReleaseImage condition being false. + // Attempting to use a release with a skew against a NodePool release bigger than N-2 for the y-stream will result in leaving the NodePool in an unsupported state. + // Changing this field will trigger a rollout of the control plane components. + // The behavior of the rollout will be driven by the ControllerAvailabilityPolicy and InfrastructureAvailabilityPolicy for PDBs and maxUnavailable and surce policies. + // +required Release Release `json:"release"` - // ControlPlaneRelease specifies the desired OCP release payload for - // control plane components running on the management cluster. - // Updating this field will trigger a rollout of the control plane. The - // behavior of the rollout will be driven by the ControllerAvailabilityPolicy - // and InfrastructureAvailabilityPolicy. - // If not defined, Release is used + // controlPlaneRelease is like spec.release but only for the components running on the management cluster. + // This excludes any operand which will land in the hosted cluster data plane. + // It is useful when you need to apply patch management side like a CVE, transparently for the hosted cluster. + // Version input for this field is free, no validation is performed against spec.release or maximum and minimum is performed. + // If defined, it will dicate the version of the components running management side, while spec.release will dictate the version of the components landing in the hosted cluster data plane. + // If not defined, spec.release is used for both. + // Changing this field will trigger a rollout of the control plane. + // The behavior of the rollout will be driven by the ControllerAvailabilityPolicy and InfrastructureAvailabilityPolicy for PDBs and maxUnavailable and surce policies. // +optional ControlPlaneRelease *Release `json:"controlPlaneRelease,omitempty"` - // ClusterID uniquely identifies this cluster. This is expected to be - // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in - // hexadecimal values). - // As with a Kubernetes metadata.uid, this ID uniquely identifies this - // cluster in space and time. - // This value identifies the cluster in metrics pushed to telemetry and - // metrics produced by the control plane operators. If a value is not - // specified, an ID is generated. After initial creation, the value is - // immutable. - // +kubebuilder:validation:Pattern:="[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" + // clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal digits). + // As with a Kubernetes metadata.uid, this ID uniquely identifies this cluster in space and time. + // This value identifies the cluster in metrics pushed to telemetry and metrics produced by the control plane operators. + // If a value is not specified, a random clusterID will be generated and set by the controller. + // Once set, this value is immutable. + // +kubebuilder:validation:XValidation:rule="self.matches('[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}')",message="clusterID must be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal digits)" + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="clusterID is immutable" + // +kubebuilder:validation:MaxLength=36 + // +kubebuilder:validation:MinLength=36 // +optional ClusterID string `json:"clusterID,omitempty"` + // infraID is a globally unique identifier for the cluster. + // It must consist of lowercase alphanumeric characters and hyphens ('-') only, and start and end with an alphanumeric character. + // It must be no more than 253 characters in length. + // This identifier will be used to associate various cloud resources with the HostedCluster and its associated NodePools. + // infraID is used to compute and tag created resources with "kubernetes.io/cluster/"+hcluster.Spec.InfraID which has contractual meaning for the cloud provider implementations. + // If a value is not specified, a random infraID will be generated and set by the controller. + // Once set, this value is immutable. + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')",message="infraID must consist of lowercase alphanumeric characters or '-', start and end with an alphanumeric character, and be between 1 and 253 characters" + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="infraID is immutable" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +optional + InfraID string `json:"infraID,omitempty"` + // updateService may be used to specify the preferred upstream update service. - // By default it will use the appropriate update service for the cluster and region. - // + // If ommitted we will use the appropriate update service for the cluster and region. + // This is used by the control plane operator to determine and signal the appropriate available upgrades in the hostedCluster.status. + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="updateService must be a valid absolute URL" // +optional UpdateService configv1.URL `json:"updateService,omitempty"` - // channel is an identifier for explicitly requesting that a non-default - // set of updates be applied to this cluster. The default channel will be - // contain stable updates that are appropriate for production clusters. - // + // channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. + // If ommited no particular upgrades are suggested. + // TODO(alberto): Consider the backend to use the default channel by default. Default channel will contain stable updates that are appropriate for production clusters. + // +kubebuilder:validation:MaxLength=100 + // +kubebuilder:validation:MinLength=1 // +optional Channel string `json:"channel,omitempty"` - // InfraID is a globally unique identifier for the cluster. This identifier - // will be used to associate various cloud resources with the HostedCluster - // and its associated NodePools. - // - // +optional - // +immutable - InfraID string `json:"infraID,omitempty"` - - // Platform specifies the underlying infrastructure provider for the cluster + // platform specifies the underlying infrastructure provider for the cluster // and is used to configure platform specific behavior. - // - // +immutable + // +required Platform PlatformSpec `json:"platform"` - // ControllerAvailabilityPolicy specifies the availability policy applied to - // critical control plane components. The default value is HighlyAvailable. - // + // controllerAvailabilityPolicy specifies the availability policy applied to critical control plane components like the Kube API Server. + // Possible values are HighlyAvailable and SingleReplica. The default value is HighlyAvailable. // +optional // +kubebuilder:default:="HighlyAvailable" - // +immutable ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"` - // InfrastructureAvailabilityPolicy specifies the availability policy applied - // to infrastructure services which run on cluster nodes. The default value is - // SingleReplica. - // + // infrastructureAvailabilityPolicy specifies the availability policy applied to infrastructure services which run on the hosted cluster data plane like the ingress controller and image registry controller. + // Possible values are HighlyAvailable and SingleReplica. The default value is SingleReplica. // +optional // +kubebuilder:default:="SingleReplica" - // +immutable InfrastructureAvailabilityPolicy AvailabilityPolicy `json:"infrastructureAvailabilityPolicy,omitempty"` - // DNS specifies DNS configuration for the cluster. - // - // +immutable + // dns specifies the DNS configuration for the hosted cluster ingress. + // +optional DNS DNSSpec `json:"dns,omitempty"` - // Networking specifies network configuration for the cluster. - // - // +immutable + // networking specifies network configuration for the hosted cluster. + // Defaults to OVNKubernetes with a cluster network of cidr: "10.132.0.0/14" and a service network of cidr: "172.31.0.0/16". + // +required // +kubebuilder:default={networkType: "OVNKubernetes", clusterNetwork: {{cidr: "10.132.0.0/14"}}, serviceNetwork: {{cidr: "172.31.0.0/16"}}} Networking ClusterNetworking `json:"networking"` - // Autoscaling specifies auto-scaling behavior that applies to all NodePools - // associated with the control plane. + // autoscaling specifies auto-scaling behavior that applies to all NodePools + // associated with this HostedCluster. // // +optional Autoscaling ClusterAutoscaling `json:"autoscaling,omitempty"` - // Etcd specifies configuration for the control plane etcd cluster. The - // default ManagementType is Managed. Once set, the ManagementType cannot be + // etcd specifies configuration for the control plane etcd cluster. The + // default managementType is Managed. Once set, the managementType cannot be // changed. // - // +kubebuilder:validation:Optional // +kubebuilder:default={managementType: "Managed", managed: {storage: {type: "PersistentVolume", persistentVolume: {size: "8Gi"}}}} + // +required // +immutable Etcd EtcdSpec `json:"etcd"` - // Services specifies how individual control plane services are published from - // the hosting cluster of the control plane. - // - // If a given service is not present in this list, it will be exposed publicly - // by default. + // services specifies how individual control plane services endpoints are published for consumption. + // This requires APIServer;OAuthServer;Konnectivity;Ignition. + // This field is immutable for all platforms but IBMCloud. + // Max is 6 to account for OIDC;OVNSbDb for backward compability though they are no-op. + // + // +kubebuilder:validation:MaxItems=6 + // +kubebuilder:validation:MinItems=4 + // +kubebuilder:validation:ListType=atomic + // -kubebuilder:validation:XValidation:rule="self.all(s, !(s.service == 'APIServer' && s.servicePublishingStrategy.type == 'Route') || has(s.servicePublishingStrategy.route.hostname))",message="If serviceType is 'APIServer' and publishing strategy is 'Route', then hostname must be set" + // -kubebuilder:validation:XValidation:rule="['APIServer', 'OAuthServer', 'Konnectivity', 'Ignition'].all(requiredType, self.exists(s, s.service == requiredType))",message="Services list must contain at least 'APIServer', 'OAuthServer', 'Konnectivity', and 'Ignition' service types" + // -kubebuilder:validation:XValidation:rule="self.filter(s, s.servicePublishingStrategy.type == 'Route' && has(s.servicePublishingStrategy.route) && has(s.servicePublishingStrategy.route.hostname)).all(x, self.filter(y, y.servicePublishingStrategy.type == 'Route' && (has(y.servicePublishingStrategy.route) && has(y.servicePublishingStrategy.route.hostname) && y.servicePublishingStrategy.route.hostname == x.servicePublishingStrategy.route.hostname)).size() <= 1)",message="Each route publishingStrategy 'hostname' must be unique within the Services list." + // -kubebuilder:validation:XValidation:rule="self.filter(s, s.servicePublishingStrategy.type == 'NodePort' && has(s.servicePublishingStrategy.nodePort) && has(s.servicePublishingStrategy.nodePort.address) && has(s.servicePublishingStrategy.nodePort.port)).all(x, self.filter(y, y.servicePublishingStrategy.type == 'NodePort' && (has(y.servicePublishingStrategy.nodePort) && has(y.servicePublishingStrategy.nodePort.address) && y.servicePublishingStrategy.nodePort.address == x.servicePublishingStrategy.nodePort.address && has(y.servicePublishingStrategy.nodePort.port) && y.servicePublishingStrategy.nodePort.port == x.servicePublishingStrategy.nodePort.port )).size() <= 1)",message="Each nodePort publishingStrategy 'nodePort' and 'hostname' must be unique within the Services list." + // TODO(alberto): this breaks the cost budget for < 4.17. We should figure why and enable it back. And If not fixable, consider imposing a minimum version on the management cluster. + // +required + // +immutable Services []ServicePublishingStrategyMapping `json:"services"` - // PullSecret references a pull secret to be injected into the container - // runtime of all cluster nodes. The secret must have a key named - // ".dockerconfigjson" whose value is the pull secret JSON. + // pullSecret is a local reference to a Secret that must have a ".dockerconfigjson" key whose content must be a valid Openshift pull secret JSON. + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. + // This pull secret will be part of every payload generated by the controllers for any NodePool of the HostedCluster + // and it will be injected into the container runtime of all NodePools. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + // Changing the content of the secret inplace will not trigger a rollout and might result in unpredicatble behaviour. + // +required + // +rollout + // TODO(alberto): have our own local reference type to include our opinions and avoid transparent changes. PullSecret corev1.LocalObjectReference `json:"pullSecret"` - // SSHKey references an SSH key to be injected into all cluster node sshd - // servers. The secret must have a single key "id_rsa.pub" whose value is the - // public part of an SSH key. - // - // +immutable + // sshKey is a local reference to a Secret that must have a "id_rsa.pub" key whose content must be the public part of 1..N SSH keys. + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. + // When sshKey is set, the controllers will generate a machineConfig with the sshAuthorizedKeys https://coreos.github.io/ignition/configuration-v3_2/ populated with this value. + // This MachineConfig will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + // +rollout + // +optional SSHKey corev1.LocalObjectReference `json:"sshKey"` - // IssuerURL is an OIDC issuer URL which is used as the issuer in all - // ServiceAccount tokens generated by the control plane API server. The - // default value is kubernetes.default.svc, which only works for in-cluster + // issuerURL is an OIDC issuer URL which will be used as the issuer in all + // ServiceAccount tokens generated by the control plane API server via --service-account-issuer kube api server flag. + // https://k8s-docs.netlify.app/en/docs/reference/command-line-tools-reference/kube-apiserver/ + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#serviceaccount-token-volume-projection + // The default value is kubernetes.default.svc, which only works for in-cluster // validation. - // + // If the platform is AWS and this value is set, the controller will update an s3 object with the appropriate OIDC documents (using the serviceAccountSigningKey info) into that issuerURL. + // The expectation is for this s3 url to be backed by an OIDC provider in the AWS IAM. // +kubebuilder:default:="https://kubernetes.default.svc" // +immutable // +optional - // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="issuerURL is immutable" + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="issuerURL must be a valid absolute URL" IssuerURL string `json:"issuerURL,omitempty"` - // ServiceAccountSigningKey is a reference to a secret containing the private key - // used by the service account token issuer. The secret is expected to contain - // a single key named "key". If not specified, a service account signing key will - // be generated automatically for the cluster. When specifying a service account - // signing key, a IssuerURL must also be specified. + // serviceAccountSigningKey is a local reference to a secret that must have a "key" key whose content must be the private key + // used by the service account token issuer. + // If not specified, a service account signing key will + // be generated automatically for the cluster. + // When specifying a service account signing key, an IssuerURL must also be specified. + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. // // +immutable - // +kubebuilder:validation:Optional // +optional ServiceAccountSigningKey *corev1.LocalObjectReference `json:"serviceAccountSigningKey,omitempty"` @@ -466,7 +533,6 @@ type HostedClusterSpec struct { // cluster, represented as embedded resources that correspond to the openshift // configuration API. // - // +kubebuilder:validation:Optional // +optional Configuration *ClusterConfiguration `json:"configuration,omitempty"` @@ -481,37 +547,44 @@ type HostedClusterSpec struct { // +immutable AuditWebhook *corev1.LocalObjectReference `json:"auditWebhook,omitempty"` - // ImageContentSources specifies image mirrors that can be used by cluster + // imageContentSources specifies image mirrors that can be used by cluster // nodes to pull content. - // + // When imageContentSources is set, the controllers will generate a machineConfig. + // This MachineConfig will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. // +optional - // +immutable ImageContentSources []ImageContentSource `json:"imageContentSources,omitempty"` - // AdditionalTrustBundle is a reference to a ConfigMap containing a - // PEM-encoded X.509 certificate bundle that will be added to the hosted controlplane and nodes - // + // additionalTrustBundle is a local reference to a ConfigMap that must have a "ca-bundle.crt" key + // whose content must be a PEM-encoded X.509 certificate bundle that will be added to the hosted controlplane and nodes + // If the reference is set but none of the above requirements are met, the HostedCluster will enter a degraded state. + // TODO(alberto): Signal this in a condition. + // This will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. // +optional AdditionalTrustBundle *corev1.LocalObjectReference `json:"additionalTrustBundle,omitempty"` - // SecretEncryption specifies a Kubernetes secret encryption strategy for the + // secretEncryption specifies a Kubernetes secret encryption strategy for the // control plane. // // +optional SecretEncryption *SecretEncryptionSpec `json:"secretEncryption,omitempty"` - // FIPS indicates whether this cluster's nodes will be running in FIPS mode. + // fips indicates whether this cluster's nodes will be running in FIPS mode. // If set to true, the control plane's ignition server will be configured to // expect that nodes joining the cluster will be FIPS-enabled. - // + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="fips is immutable" // +optional // +immutable FIPS bool `json:"fips"` - // PausedUntil is a field that can be used to pause reconciliation on a resource. - // Either a date can be provided in RFC3339 format or a boolean. If a date is + // pausedUntil is a field that can be used to pause reconciliation on the HostedCluster controller, resulting in any change to the HostedCluster being ignored. + // Either a date can be provided in RFC3339 format or a boolean as in 'true', 'false', 'True', 'False'. If a date is // provided: reconciliation is paused on the resource until that date. If the boolean true is // provided: reconciliation is paused on the resource until the field is removed. + // +kubebuilder:validation:MaxLength=35 + // +kubebuilder:validation:MinLength=4 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.*$') || self in ['true', 'false', 'True', 'False']`,message="PausedUntil must be a date in RFC3339 format or 'True', 'true', 'False' or 'false'" // +optional PausedUntil *string `json:"pausedUntil,omitempty"` @@ -526,8 +599,11 @@ type HostedClusterSpec struct { // +immutable OLMCatalogPlacement OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"` - // NodeSelector when specified, must be true for the pods managed by the HostedCluster to be scheduled. - // + // NodeSelector when specified, is propagated to all control plane Deployments and Stateful sets running management side. + // It must be satisfied by the management Nodes for the pods to be scheduled. Otherwise the HostedCluster will enter a degraded state. + // Changes to this field will propagate to existing Deployments and StatefulSets. + // +kubebuilder:validation:XValidation:rule="size(self) <= 20",message="nodeSelector map can have at most 20 entries" + // TODO(alberto): add additional validation for the map key/values. // +optional NodeSelector map[string]string `json:"nodeSelector,omitempty"` @@ -535,6 +611,19 @@ type HostedClusterSpec struct { // // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // labels when specified, define what custom labels are added to the hcp pods. + // Changing this day 2 will cause a rollout of all hcp pods. + // Duplicate keys are not supported. If duplicate keys are defined, only the last key/value pair is preserved. + // Valid values are those in https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + // + // -kubebuilder:validation:XValidation:rule=`self.all(key, size(key) <= 317 && key.matches('^(([A-Za-z0-9]+(\\.[A-Za-z0-9]+)?)*[A-Za-z0-9]\\/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$'))`, message="label key must have two segments: an optional prefix and name, separated by a slash (/). The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots (.), not longer than 253 characters in total, followed by a slash (/)" + // -kubebuilder:validation:XValidation:rule=`self.all(key, size(self[key]) <= 63 && self[key].matches('^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$'))`, message="label value must be 63 characters or less (can be empty), consist of alphanumeric characters, dashes (-), underscores (_) or dots (.), and begin and end with an alphanumeric character" + // TODO: key/value validations break cost budget for <=4.17. We should figure why and enable it back. + // +kubebuilder:validation:MaxProperties=20 + // +optional + // +openshift:enable:FeatureGate=HCPPodsLabels + Labels map[string]string `json:"labels,omitempty"` } // OLMCatalogPlacement is an enum specifying the placement of OLM catalog components. @@ -589,34 +678,51 @@ type ImageContentSource struct { Mirrors []string `json:"mirrors,omitempty"` } -// ServicePublishingStrategyMapping specifies how individual control plane -// services are published from the hosting cluster of a control plane. +// ServicePublishingStrategyMapping specifies how individual control plane services endpoints are published for consumption. +// This includes APIServer;OAuthServer;Konnectivity;Ignition. +// If a given service is not present in this list, it will be exposed publicly by default. type ServicePublishingStrategyMapping struct { - // Service identifies the type of service being published. + // service identifies the type of service being published. + // It can be APIServer;OAuthServer;Konnectivity;Ignition + // OVNSbDb;OIDC are no-op and kept for backward compatibility. + // This field is immutable. // // +kubebuilder:validation:Enum=APIServer;OAuthServer;OIDC;Konnectivity;Ignition;OVNSbDb // +immutable + // +required Service ServiceType `json:"service"` - // ServicePublishingStrategy specifies how to publish Service. + // servicePublishingStrategy specifies how to publish a service endpoint. + // +required ServicePublishingStrategy `json:"servicePublishingStrategy"` } // ServicePublishingStrategy specfies how to publish a ServiceType. +// +kubebuilder:validation:XValidation:rule="self.type == 'NodePort' ? has(self.nodePort) : !has(self.nodePort)",message="nodePort is required when type is NodePort, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.type == 'Route' ? !has(self.nodePort) && !has(self.loadBalancer) : !has(self.route)",message="only route is allowed when type is Route, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.type == 'LoadBalancer' ? !has(self.nodePort) && !has(self.route) : !has(self.loadBalancer)",message="only loadBalancer is required when type is LoadBalancer, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.type == 'None' ? !has(self.nodePort) && !has(self.route) && !has(self.loadBalancer) : true",message="None does not allowed any configuration for loadBalancer, nodePort, or route" +// +kubebuilder:validation:XValidation:rule="self.type == 'S3' ? !has(self.nodePort) && !has(self.route) && !has(self.loadBalancer) : true",message="S3 does not allowed any configuration for loadBalancer, nodePort, or route" + type ServicePublishingStrategy struct { - // Type is the publishing strategy used for the service. + // type is the publishing strategy used for the service. + // It can be LoadBalancer;NodePort;Route;None;S3 // // +kubebuilder:validation:Enum=LoadBalancer;NodePort;Route;None;S3 - // +immutable + // +required Type PublishingStrategyType `json:"type"` - // NodePort configures exposing a service using a NodePort. + // nodePort configures exposing a service using a NodePort. + // +optional NodePort *NodePortPublishingStrategy `json:"nodePort,omitempty"` - // LoadBalancer configures exposing a service using a LoadBalancer. + // loadBalancer configures exposing a service using a dedicated LoadBalancer. + // +optional LoadBalancer *LoadBalancerPublishingStrategy `json:"loadBalancer,omitempty"` - // Route configures exposing a service using a Route. + // route configures exposing a service using a Route through and an ingress controller behind a cloud Load Balancer. + // The specifics of the setup are platform dependent. + // +optional Route *RoutePublishingStrategy `json:"route,omitempty"` } @@ -650,98 +756,151 @@ var ( // OAuthServer is the control plane OAuth service. OAuthServer ServiceType = "OAuthServer" - // OIDC is the control plane OIDC service. - OIDC ServiceType = "OIDC" - // Ignition is the control plane ignition service for nodes. Ignition ServiceType = "Ignition" // OVNSbDb is the optional control plane ovn southbound database service used by OVNKubernetes CNI. + // Deprecated: This service is no longer used by OVNKubernetes CNI for >= 4.14. OVNSbDb ServiceType = "OVNSbDb" + + // OIDC is the control plane OIDC service. + // Deprecated: This service is no longer used by the control plane. + OIDC ServiceType = "OIDC" ) // NodePortPublishingStrategy specifies a NodePort used to expose a service. type NodePortPublishingStrategy struct { - // Address is the host/ip that the NodePort service is exposed over. + // address is the host/ip that the NodePort service is exposed over. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule=`self.matches('^(([a-zA-Z0-9][-a-zA-Z0-9]*\\.)+[a-zA-Z]{2,}|localhost)$') || self.matches('^((\\d{1,3}\\.){3}\\d{1,3})$') || self.matches('^([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$')`, message="address must be a valid hostname, IPv4, or IPv6 address" + // +required Address string `json:"address"` - // Port is the port of the NodePort service. If <=0, the port is dynamically + // port is the port of the NodePort service. If <=0, the port is dynamically // assigned when the service is created. Port int32 `json:"port,omitempty"` } // LoadBalancerPublishingStrategy specifies setting used to expose a service as a LoadBalancer. type LoadBalancerPublishingStrategy struct { - // Hostname is the name of the DNS record that will be created pointing to the LoadBalancer. + // hostname is the name of the DNS record that will be created pointing to the LoadBalancer and passed through to consumers of the service. + // If ommited, the value will be infered from the corev1.Service Load balancer type .status. + // +kubebuilder:validation:XValidation:rule=`self.matches('^(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,}$')`,message="baseDomain must be a valid base domain (e.g., example.com)" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 // +optional Hostname string `json:"hostname,omitempty"` } // RoutePublishingStrategy specifies options for exposing a service as a Route. type RoutePublishingStrategy struct { - // Hostname is the name of the DNS record that will be created pointing to the Route. + // Hostname is the name of the DNS record that will be created pointing to the Route and passed through to consumers of the service. + // If ommited, the value will be infered from management ingress.Spec.Domain. + // +kubebuilder:validation:XValidation:rule=`self.matches('^(?:[a-zA-Z0-9-]+\\.)+[a-zA-Z]{2,}$')`,message="hostname must be a valid domain name (e.g., example.com)" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 // +optional Hostname string `json:"hostname,omitempty"` } -// DNSSpec specifies the DNS configuration in the cluster. +// DNSSpec specifies the DNS configuration for the hosted cluster ingress. type DNSSpec struct { - // BaseDomain is the base domain of the cluster. - // + // baseDomain is the base domain of the hosted cluster. + // It will be used to confgure ingress in the hosted cluster through the subdomain baseDomainPrefix.baseDomain. + // If baseDomainPrefix is ommitted, the hostedCluster.name will be used as the subdomain. + // Once set, this field is immutable. + // When the value is the empty string "", the controller might default to a value depending on the platform. + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="baseDomain is immutable" + // +kubebuilder:validation:MaxLength=253 // +immutable + // +required BaseDomain string `json:"baseDomain"` - // BaseDomainPrefix is the base domain prefix of the cluster. - // defaults to clusterName if not set. Set it to "" if you don't want a prefix to be prepended to BaseDomain. - // + // baseDomainPrefix is the base domain prefix for the hosted cluster ingress. + // It will be used to confgure ingress in the hosted cluster through the subdomain baseDomainPrefix.baseDomain. + // If baseDomainPrefix is ommitted, the hostedCluster.name will be used as the subdomain. + // Set baseDomainPrefix to an empty string "", if you don't want a prefix at all (not even hostedCluster.name) to be prepended to baseDomain. + // This field is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="baseDomainPrefix is immutable" + // +kubebuilder:validation:MaxLength=253 // +optional - // +immutable BaseDomainPrefix *string `json:"baseDomainPrefix,omitempty"` - // PublicZoneID is the Hosted Zone ID where all the DNS records that are - // publicly accessible to the internet exist. - // + // publicZoneID is the Hosted Zone ID where all the DNS records that are publicly accessible to the internet exist. + // This field is optional and mainly leveraged in cloud environments where the DNS records for the .baseDomain are created by controllers in this zone. + // Once set, this value is immutable. // +optional + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="publicZoneID is immutable" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 // +immutable PublicZoneID string `json:"publicZoneID,omitempty"` - // PrivateZoneID is the Hosted Zone ID where all the DNS records that are only - // available internally to the cluster exist. - // + // privateZoneID is the Hosted Zone ID where all the DNS records that are only available internally to the cluster exist. + // This field is optional and mainly leveraged in cloud environments where the DNS records for the .baseDomain are created by controllers in this zone. + // Once set, this value is immutable. // +optional + // +kubebuilder:validation:XValidation:rule=`oldSelf == "" || self == oldSelf`, message="privateZoneID is immutable" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 // +immutable PrivateZoneID string `json:"privateZoneID,omitempty"` } -// ClusterNetworking specifies network configuration for a cluster. +// clusterNetworking specifies network configuration for a cluster. +// All CIDRs must be unique. Additional validation to check for CIDRs overlap and consistent network stack is perfomed by the controllers. +// Failing that validation will result in the HostedCluster being degraded and the validConfiguration condition being false. +// TODO this is available in vanilla kube from 1.31 API servers and in Openshift from 4.16. +// TODO(alberto): Use CEL cidr library for all these validation when all management clusters are >= 1.31. +// +kubebuilder:validation:XValidation:rule="(!has(self.machineNetwork) && self.clusterNetwork.all(c, self.serviceNetwork.all(s, c.cidr != s.cidr)) || (has(self.machineNetwork) && (self.machineNetwork.all(m, self.clusterNetwork.all(c, m.cidr != c.cidr)) && self.machineNetwork.all(m, self.serviceNetwork.all(s, m.cidr != s.cidr)) && self.clusterNetwork.all(c, self.serviceNetwork.all(s, c.cidr != s.cidr)))))",message="CIDR ranges in machineNetwork, clusterNetwork, and serviceNetwork must be unique and non-overlapping" type ClusterNetworking struct { - // MachineNetwork is the list of IP address pools for machines. - // + // machineNetwork is the list of IP address pools for machines. + // This might be used among other things to generate appropriate networking security groups in some clouds providers. + // Currently only one entry or two for dual stack is supported. + // This field is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="machineNetwork is immutable and cannot be modified once set." + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:ListType=atomic // +immutable // +optional MachineNetwork []MachineNetworkEntry `json:"machineNetwork,omitempty"` - // ClusterNetwork is the list of IP address pools for pods. - // + // clusterNetwork is the list of IP address pools for pods. + // Defaults to cidr: "10.132.0.0/14". + // Currently only one entry is supported. + // This field is immutable. // +immutable + // +optional // +kubebuilder:default:={{cidr: "10.132.0.0/14"}} - ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` - - // ServiceNetwork is the list of IP address pools for services. - // NOTE: currently only one entry is supported. - // + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="clusterNetwork is immutable and cannot be modified once set." + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:MinItems=1 + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` + + // serviceNetwork is the list of IP address pools for services. + // Defaults to cidr: "172.31.0.0/16". + // Currently only one entry is supported. + // This field is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="serviceNetwork is immutable and cannot be modified once set." + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:MinItems=1 // +optional // +kubebuilder:default:={{cidr: "172.31.0.0/16"}} - ServiceNetwork []ServiceNetworkEntry `json:"serviceNetwork"` + ServiceNetwork []ServiceNetworkEntry `json:"serviceNetwork,omitempty"` - // NetworkType specifies the SDN provider used for cluster networking. - // + // networkType specifies the SDN provider used for cluster networking. + // Defaults to OVNKubernetes. + // This field is required and immutable. + // kubebuilder:validation:XValidation:rule="self == oldSelf", message="networkType is immutable" + // +optional // +kubebuilder:default:="OVNKubernetes" // +immutable - NetworkType NetworkType `json:"networkType"` + NetworkType NetworkType `json:"networkType,omitempty"` - // APIServer contains advanced network settings for the API server that affect - // how the APIServer is exposed inside a cluster node. + // apiServer contains advanced network settings for the API server that affect + // how the APIServer is exposed inside a hosted cluster node. // // +immutable APIServer *APIServerNetworking `json:"apiServer,omitempty"` @@ -756,11 +915,11 @@ type MachineNetworkEntry struct { // ClusterNetworkEntry is a single IP address block for pod IP blocks. IP blocks // are allocated with size 2^HostSubnetLength. type ClusterNetworkEntry struct { - // CIDR is the IP block address pool. + // cidr is the IP block address pool. CIDR ipnet.IPNet `json:"cidr"` - // HostPrefix is the prefix size to allocate to each node from the CIDR. - // For example, 24 would allocate 2^8=256 adresses to each node. If this + // hostPrefix is the prefix size to allocate to each node from the CIDR. + // For example, 24 would allocate 2^(32-24)=2^8=256 adresses to each node. If this // field is not used by the plugin, it can be left unset. // +optional HostPrefix int32 `json:"hostPrefix,omitempty"` @@ -768,7 +927,7 @@ type ClusterNetworkEntry struct { // ServiceNetworkEntry is a single IP address block for the service network. type ServiceNetworkEntry struct { - // CIDR is the IP block address pool for services within the cluster. + // cidr is the IP block address pool for services within the cluster in CIDR format (e.g., 192.168.1.0/24 or 2001:0db8::/64) CIDR ipnet.IPNet `json:"cidr"` } @@ -778,21 +937,27 @@ type CIDRBlock string // APIServerNetworking specifies how the APIServer is exposed inside a cluster // node. type APIServerNetworking struct { - // AdvertiseAddress is the address that nodes will use to talk to the API + // advertiseAddress is the address that pods within the nodes will use to talk to the API // server. This is an address associated with the loopback adapter of each // node. If not specified, the controller will take default values. // The default values will be set as 172.20.0.1 or fd00::1. + // This value is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="advertiseAddress is immutable" + // +optional AdvertiseAddress *string `json:"advertiseAddress,omitempty"` - // Port is the port at which the APIServer is exposed inside a node. Other + // port is the port at which the APIServer is exposed inside a node. Other // pods using host networking cannot listen on this port. - // If unset 6443 is used. + // If ommited 6443 is used. // This is useful to choose a port other than the default one which might interfere with customer environments e.g. https://github.com/openshift/hypershift/pull/356. // Setting this to 443 is possible only for backward compatibility reasons and it's discouraged. // Doing so, it would result in the controller overriding the KAS endpoint in the guest cluster having a discrepancy with the KAS Pod and potentially causing temporarily network failures. + // This value is immutable. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="port is immutable" + // +optional Port *int32 `json:"port,omitempty"` - // AllowedCIDRBlocks is an allow list of CIDR blocks that can access the APIServer + // allowedCIDRBlocks is an allow list of CIDR blocks that can access the APIServer // If not specified, traffic is allowed from all addresses. // This depends on underlying support by the cloud provider for Service LoadBalancerSourceRanges AllowedCIDRBlocks []CIDRBlock `json:"allowedCIDRBlocks,omitempty"` @@ -818,8 +983,6 @@ const ( ) // PlatformType is a specific supported infrastructure provider. -// -// +kubebuilder:validation:Enum=AWS;None;IBMCloud;Agent;KubeVirt;Azure;PowerVS;OpenStack type PlatformType string const ( @@ -868,7 +1031,10 @@ type PlatformSpec struct { // Type is the type of infrastructure provider for the cluster. // // +unionDiscriminator + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Type is immutable" // +immutable + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=AWS;Azure;IBMCloud;KubeVirt;Agent;PowerVS;None + // +openshift:validation:FeatureGateAwareEnum:featureGate=OpenStack,enum=AWS;Azure;IBMCloud;KubeVirt;Agent;PowerVS;None;OpenStack Type PlatformType `json:"type"` // AWS specifies configuration for clusters running on Amazon Web Services. @@ -903,1404 +1069,193 @@ type PlatformSpec struct { Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` // OpenStack specifies configuration for clusters running on OpenStack. - // // +optional - // +immutable + // +openshift:enable:FeatureGate=OpenStack OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` } -type KubevirtPlatformCredentials struct { - // InfraKubeConfigSecret is a reference to a secret that contains the kubeconfig for the external infra cluster - // that will be used to host the KubeVirt virtual machines for this cluster. - // - // +immutable - // +kubebuilder:validation:Required - // +required - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="infraKubeConfigSecret is immutable" - InfraKubeConfigSecret *KubeconfigSecretRef `json:"infraKubeConfigSecret,omitempty"` +// IBMCloudPlatformSpec defines IBMCloud specific settings for components +type IBMCloudPlatformSpec struct { + // ProviderType is a specific supported infrastructure provider within IBM Cloud. + ProviderType configv1.IBMCloudProviderType `json:"providerType,omitempty"` +} - // InfraNamespace defines the namespace on the external infra cluster that is used to host the KubeVirt - // virtual machines. This namespace must already exist before creating the HostedCluster and the kubeconfig - // referenced in the InfraKubeConfigSecret must have access to manage the required resources within this - // namespace. - // - // +immutable - // +kubebuilder:validation:Required +// Release represents the metadata for an OCP release payload image. +type Release struct { + // Image is the image pullspec of an OCP release payload image. + // See https://quay.io/repository/openshift-release-dev/ocp-release?tab=tags for a list of available images. + // +kubebuilder:validation:XValidation:rule=`self.matches('^(\\w+\\S+)$')`,message="Image must start with a word character (letters, digits, or underscores) and contain no white spaces" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 // +required - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="infraNamespace is immutable" - InfraNamespace string `json:"infraNamespace"` + Image string `json:"image"` } -// KubevirtPlatformSpec specifies configuration for kubevirt guest cluster installations -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.generateID) || has(self.generateID)", message="Kubevirt GenerateID is required once set" -type KubevirtPlatformSpec struct { - // BaseDomainPassthrough toggles whether or not an automatically - // generated base domain for the guest cluster should be used that - // is a subdomain of the management cluster's *.apps DNS. - // - // For the KubeVirt platform, the basedomain can be autogenerated using - // the *.apps domain of the management/infra hosting cluster - // This makes the guest cluster's base domain a subdomain of the - // hypershift infra/mgmt cluster's base domain. - // - // Example: - // Infra/Mgmt cluster's DNS - // Base: example.com - // Cluster: mgmt-cluster.example.com - // Apps: *.apps.mgmt-cluster.example.com - // KubeVirt Guest cluster's DNS - // Base: apps.mgmt-cluster.example.com - // Cluster: guest.apps.mgmt-cluster.example.com - // Apps: *.apps.guest.apps.mgmt-cluster.example.com - // - // This is possible using OCP wildcard routes +// ClusterAutoscaling specifies auto-scaling behavior that applies to all +// NodePools associated with a control plane. +type ClusterAutoscaling struct { + // maxNodesTotal is the maximum allowable number of nodes for the Autoscaler scale out to be operational. + // The autoscaler will not grow the cluster beyond this number. + // If omitted, the autoscaler will not have a maximum limit. + // number. // + // +kubebuilder:validation:Minimum=0 // +optional - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="baseDomainPassthrough is immutable" - BaseDomainPassthrough *bool `json:"baseDomainPassthrough,omitempty"` + MaxNodesTotal *int32 `json:"maxNodesTotal,omitempty"` - // GenerateID is used to uniquely apply a name suffix to resources associated with - // kubevirt infrastructure resources - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Kubevirt GenerateID is immutable once set" - // +kubebuilder:validation:MaxLength=11 + // maxPodGracePeriod is the maximum seconds to wait for graceful pod + // termination before scaling down a NodePool. The default is 600 seconds. + // + // +kubebuilder:validation:Minimum=0 // +optional - GenerateID string `json:"generateID,omitempty"` - // Credentials defines the client credentials used when creating KubeVirt virtual machines. - // Defining credentials is only necessary when the KubeVirt virtual machines are being placed - // on a cluster separate from the one hosting the Hosted Control Plane components. + MaxPodGracePeriod *int32 `json:"maxPodGracePeriod,omitempty"` + + // maxNodeProvisionTime is the maximum time to wait for node provisioning + // before considering the provisioning to be unsuccessful, expressed as a Go + // duration string. The default is 15 minutes. // - // The default behavior when Credentials is not defined is for the KubeVirt VMs to be placed on - // the same cluster and namespace as the Hosted Control Plane. + // +kubebuilder:validation:Pattern=^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ // +optional - Credentials *KubevirtPlatformCredentials `json:"credentials,omitempty"` + MaxNodeProvisionTime string `json:"maxNodeProvisionTime,omitempty"` - // StorageDriver defines how the KubeVirt CSI driver exposes StorageClasses on - // the infra cluster (hosting the VMs) to the guest cluster. + // podPriorityThreshold enables users to schedule "best-effort" pods, which + // shouldn't trigger autoscaler actions, but only run when there are spare + // resources available. The default is -10. + // + // See the following for more details: + // https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-does-cluster-autoscaler-work-with-pod-priority-and-preemption // - // +kubebuilder:validation:Optional // +optional - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver is immutable" - StorageDriver *KubevirtStorageDriverSpec `json:"storageDriver,omitempty"` + PodPriorityThreshold *int32 `json:"podPriorityThreshold,omitempty"` } -// KubevirtStorageDriverConfigType defines how the kubevirt storage driver is configured. -// -// +kubebuilder:validation:Enum=None;Default;Manual -type KubevirtStorageDriverConfigType string +// EtcdManagementType is a enum specifying the strategy for managing the cluster's etcd instance +// +kubebuilder:validation:Enum=Managed;Unmanaged +type EtcdManagementType string const ( - // NoneKubevirtStorageDriverConfigType means no kubevirt storage driver is used - NoneKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "None" - - // DefaultKubevirtStorageDriverConfigType means the kubevirt storage driver maps to the - // underlying infra cluster's default storageclass - DefaultKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "Default" + // Managed means HyperShift should provision and operator the etcd cluster + // automatically. + Managed EtcdManagementType = "Managed" - // ManualKubevirtStorageDriverConfigType means the kubevirt storage driver mapping is - // explicitly defined. - ManualKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "Manual" + // Unmanaged means HyperShift will not provision or manage the etcd cluster, + // and the user is responsible for doing so. + Unmanaged EtcdManagementType = "Unmanaged" ) -type KubevirtStorageDriverSpec struct { - // Type represents the type of kubevirt csi driver configuration to use - // +// EtcdSpec specifies configuration for a control plane etcd cluster. +// +kubebuilder:validation:XValidation:rule="self.managementType == 'Managed' ? has(self.managed) : !has(self.managed)",message="Only managed configuration must be set when managementType is Managed" +// +kubebuilder:validation:XValidation:rule="self.managementType == 'Unmanaged' ? has(self.unmanaged) : !has(self.unmanaged)",message="Only unmanaged configuration must be set when managementType is Unmanaged" +type EtcdSpec struct { + // managementType defines how the etcd cluster is managed. + // This can be either Managed or Unmanaged. + // This field is immutable. // +unionDiscriminator + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="managementType is immutable" + // +required // +immutable - // +kubebuilder:default=Default - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver.Type is immutable" - Type KubevirtStorageDriverConfigType `json:"type,omitempty"` - - // Manual is used to explicilty define how the infra storageclasses are - // mapped to guest storageclasses - // - // +immutable - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver.Manual is immutable" - Manual *KubevirtManualStorageDriverConfig `json:"manual,omitempty"` -} + ManagementType EtcdManagementType `json:"managementType"` -type KubevirtManualStorageDriverConfig struct { - // StorageClassMapping maps StorageClasses on the infra cluster hosting - // the KubeVirt VMs to StorageClasses that are made available within the - // Guest Cluster. - // - // NOTE: It is possible that not all capablities of an infra cluster's - // storageclass will be present for the corresponding guest clusters storageclass. + // managed specifies the behavior of an etcd cluster managed by HyperShift. // // +optional // +immutable - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageClassMapping is immutable" - StorageClassMapping []KubevirtStorageClassMapping `json:"storageClassMapping,omitempty"` + Managed *ManagedEtcdSpec `json:"managed,omitempty"` + // unmanaged specifies configuration which enables the control plane to + // integrate with an externally managed etcd cluster. + // // +optional // +immutable - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="volumeSnapshotClassMapping is immutable" - VolumeSnapshotClassMapping []KubevirtVolumeSnapshotClassMapping `json:"volumeSnapshotClassMapping,omitempty"` -} - -type KubevirtStorageClassMapping struct { - // Group contains which group this mapping belongs to. - Group string `json:"group,omitempty"` - // InfraStorageClassName is the name of the infra cluster storage class that - // will be exposed to the guest. - InfraStorageClassName string `json:"infraStorageClassName"` - // GuestStorageClassName is the name that the corresponding storageclass will - // be called within the guest cluster - GuestStorageClassName string `json:"guestStorageClassName"` + Unmanaged *UnmanagedEtcdSpec `json:"unmanaged,omitempty"` } -type KubevirtVolumeSnapshotClassMapping struct { - // Group contains which group this mapping belongs to. - Group string `json:"group,omitempty"` - // InfraStorageClassName is the name of the infra cluster volume snapshot class that - // will be exposed to the guest. - InfraVolumeSnapshotClassName string `json:"infraVolumeSnapshotClassName"` - // GuestVolumeSnapshotClassName is the name that the corresponding volumeSnapshotClass will - // be called within the guest cluster - GuestVolumeSnapshotClassName string `json:"guestVolumeSnapshotClassName"` +// ManagedEtcdSpec specifies the behavior of an etcd cluster managed by +// HyperShift. +type ManagedEtcdSpec struct { + // storage specifies how etcd data is persisted. + //+required + Storage ManagedEtcdStorageSpec `json:"storage"` } -// AgentPlatformSpec specifies configuration for agent-based installations. -type AgentPlatformSpec struct { - // AgentNamespace is the namespace where to search for Agents for this cluster - AgentNamespace string `json:"agentNamespace"` -} +// ManagedEtcdStorageType is a storage type for an etcd cluster. +// +// +kubebuilder:validation:Enum=PersistentVolume +type ManagedEtcdStorageType string -// IBMCloudPlatformSpec defines IBMCloud specific settings for components -type IBMCloudPlatformSpec struct { - // ProviderType is a specific supported infrastructure provider within IBM Cloud. - ProviderType configv1.IBMCloudProviderType `json:"providerType,omitempty"` -} +const ( + // PersistentVolumeEtcdStorage uses PersistentVolumes for etcd storage. + PersistentVolumeEtcdStorage ManagedEtcdStorageType = "PersistentVolume" +) -// PowerVSPlatformSpec defines IBMCloud PowerVS specific settings for components -type PowerVSPlatformSpec struct { - // AccountID is the IBMCloud account id. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - AccountID string `json:"accountID"` +var ( + DefaultPersistentVolumeEtcdStorageSize resource.Quantity = resource.MustParse("8Gi") +) - // CISInstanceCRN is the IBMCloud CIS Service Instance's Cloud Resource Name - // This field is immutable. Once set, It can't be changed. - // - // +kubebuilder:validation:Pattern=`^crn:` +// ManagedEtcdStorageSpec describes the storage configuration for etcd data. +type ManagedEtcdStorageSpec struct { + // type is the kind of persistent storage implementation to use for etcd. + // Only PersistentVolume is supported at the moment. // +immutable - CISInstanceCRN string `json:"cisInstanceCRN"` + // +required + // +unionDiscriminator + Type ManagedEtcdStorageType `json:"type"` - // ResourceGroup is the IBMCloud Resource Group in which the cluster resides. - // This field is immutable. Once set, It can't be changed. + // persistentVolume is the configuration for PersistentVolume etcd storage. + // With this implementation, a PersistentVolume will be allocated for every + // etcd member (either 1 or 3 depending on the HostedCluster control plane + // availability configuration). // - // +immutable - ResourceGroup string `json:"resourceGroup"` + // +optional + PersistentVolume *PersistentVolumeEtcdStorageSpec `json:"persistentVolume,omitempty"` - // Region is the IBMCloud region in which the cluster resides. This configures the - // OCP control plane cloud integrations, and is used by NodePool to resolve - // the correct boot image for a given release. - // This field is immutable. Once set, It can't be changed. + // restoreSnapshotURL allows an optional URL to be provided where + // an etcd snapshot can be downloaded, for example a pre-signed URL + // referencing a storage service. + // This snapshot will be restored on initial startup, only when the etcd PV + // is empty. // + // +optional // +immutable - Region string `json:"region"` + // +kubebuilder:validation:XValidation:rule="self.size() <= 1", message="RestoreSnapshotURL shouldn't contain more than 1 entry" + RestoreSnapshotURL []string `json:"restoreSnapshotURL,omitempty"` +} - // Zone is the availability zone where control plane cloud resources are - // created. - // This field is immutable. Once set, It can't be changed. - // +// PersistentVolumeEtcdStorageSpec is the configuration for PersistentVolume +// etcd storage. +type PersistentVolumeEtcdStorageSpec struct { + // storageClassName is the StorageClass of the data volume for each etcd member. + // See https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageClassName is immutable" + // +optional // +immutable - Zone string `json:"zone"` + // TODO(alberto): This shouldn't really be a pointer. There's no real different semantic for nil and empty string. Revisit all pointer vs non-pointer choices. + StorageClassName *string `json:"storageClassName,omitempty"` - // Subnet is the subnet to use for control plane cloud resources. - // This field is immutable. Once set, It can't be changed. - // + // size is the minimum size of the data volume for each etcd member. + // Default is 8Gi. + // This field is immutable + // +optional + // +kubebuilder:default="8Gi" // +immutable - Subnet *PowerVSResourceReference `json:"subnet"` + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Etcd PV storage size is immutable" + Size *resource.Quantity `json:"size,omitempty"` +} - // ServiceInstance is the reference to the Power VS service on which the server instance(VM) will be created. - // Power VS service is a container for all Power VS instances at a specific geographic region. - // serviceInstance can be created via IBM Cloud catalog or CLI. - // ServiceInstanceID is the unique identifier that can be obtained from IBM Cloud UI or IBM Cloud cli. - // - // More detail about Power VS service instance. - // https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server +// UnmanagedEtcdSpec specifies configuration which enables the control plane to +// integrate with an eternally managed etcd cluster. +type UnmanagedEtcdSpec struct { + // endpoint is the full etcd cluster client endpoint URL. For example: // - // This field is immutable. Once set, It can't be changed. + // https://etcd-client:2379 // - // +immutable - ServiceInstanceID string `json:"serviceInstanceID"` - - // VPC specifies IBM Cloud PowerVS Load Balancing configuration for the control - // plane. - // This field is immutable. Once set, It can't be changed. + // If the URL uses an HTTPS scheme, the TLS field is required. // - // +immutable - VPC *PowerVSVPC `json:"vpc"` + // +kubebuilder:validation:Pattern=`^https://` + Endpoint string `json:"endpoint"` - // KubeCloudControllerCreds is a reference to a secret containing cloud - // credentials with permissions matching the cloud controller policy. - // This field is immutable. Once set, It can't be changed. - // - // TODO(dan): document the "cloud controller policy" - // - // +immutable - KubeCloudControllerCreds corev1.LocalObjectReference `json:"kubeCloudControllerCreds"` - - // NodePoolManagementCreds is a reference to a secret containing cloud - // credentials with permissions matching the node pool management policy. - // This field is immutable. Once set, It can't be changed. - // - // TODO(dan): document the "node pool management policy" - // - // +immutable - NodePoolManagementCreds corev1.LocalObjectReference `json:"nodePoolManagementCreds"` - - // IngressOperatorCloudCreds is a reference to a secret containing ibm cloud - // credentials for ingress operator to get authenticated with ibm cloud. - // - // +immutable - IngressOperatorCloudCreds corev1.LocalObjectReference `json:"ingressOperatorCloudCreds"` - - // StorageOperatorCloudCreds is a reference to a secret containing ibm cloud - // credentials for storage operator to get authenticated with ibm cloud. - // - // +immutable - StorageOperatorCloudCreds corev1.LocalObjectReference `json:"storageOperatorCloudCreds"` - - // ImageRegistryOperatorCloudCreds is a reference to a secret containing ibm cloud - // credentials for image registry operator to get authenticated with ibm cloud. - // - // +immutable - ImageRegistryOperatorCloudCreds corev1.LocalObjectReference `json:"imageRegistryOperatorCloudCreds"` -} - -// PowerVSVPC specifies IBM Cloud PowerVS LoadBalancer configuration for the control -// plane. -type PowerVSVPC struct { - // Name for VPC to used for all the service load balancer. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Name string `json:"name"` - - // Region is the IBMCloud region in which VPC gets created, this VPC used for all the ingress traffic - // into the OCP cluster. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - Region string `json:"region"` - - // Zone is the availability zone where load balancer cloud resources are - // created. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - // +optional - Zone string `json:"zone,omitempty"` - - // Subnet is the subnet to use for load balancer. - // This field is immutable. Once set, It can't be changed. - // - // +immutable - // +optional - Subnet string `json:"subnet,omitempty"` -} - -// PowerVSResourceReference is a reference to a specific IBMCloud PowerVS resource by ID, or Name. -// Only one of ID, or Name may be specified. Specifying more than one will result in -// a validation error. -type PowerVSResourceReference struct { - // ID of resource - // +optional - ID *string `json:"id,omitempty"` - - // Name of resource - // +optional - Name *string `json:"name,omitempty"` -} - -// AWSCloudProviderConfig specifies AWS networking configuration. -type AWSCloudProviderConfig struct { - // Subnet is the subnet to use for control plane cloud resources. - // - // +optional - Subnet *AWSResourceReference `json:"subnet,omitempty"` - - // Zone is the availability zone where control plane cloud resources are - // created. - // - // +optional - Zone string `json:"zone,omitempty"` - - // VPC is the VPC to use for control plane cloud resources. - VPC string `json:"vpc"` -} - -// AWSEndpointAccessType specifies the publishing scope of cluster endpoints. -type AWSEndpointAccessType string - -const ( - // Public endpoint access allows public API server access and public node - // communication with the control plane. - Public AWSEndpointAccessType = "Public" - - // PublicAndPrivate endpoint access allows public API server access and - // private node communication with the control plane. - PublicAndPrivate AWSEndpointAccessType = "PublicAndPrivate" - - // Private endpoint access allows only private API server access and private - // node communication with the control plane. - Private AWSEndpointAccessType = "Private" -) - -// AWSPlatformSpec specifies configuration for clusters running on Amazon Web Services. -type AWSPlatformSpec struct { - // Region is the AWS region in which the cluster resides. This configures the - // OCP control plane cloud integrations, and is used by NodePool to resolve - // the correct boot AMI for a given release. - // - // +immutable - Region string `json:"region"` - - // CloudProviderConfig specifies AWS networking configuration for the control - // plane. - // This is mainly used for cloud provider controller config: - // https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/legacy-cloud-providers/aws/aws.go#L1347-L1364 - // TODO(dan): should this be named AWSNetworkConfig? - // - // +optional - // +immutable - CloudProviderConfig *AWSCloudProviderConfig `json:"cloudProviderConfig,omitempty"` - - // ServiceEndpoints specifies optional custom endpoints which will override - // the default service endpoint of specific AWS Services. - // - // There must be only one ServiceEndpoint for a given service name. - // - // +optional - // +immutable - ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` - - // RolesRef contains references to various AWS IAM roles required to enable - // integrations such as OIDC. - // - // +immutable - RolesRef AWSRolesRef `json:"rolesRef"` - - // ResourceTags is a list of additional tags to apply to AWS resources created - // for the cluster. See - // https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for - // information on tagging AWS resources. AWS supports a maximum of 50 tags per - // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available - // for the user. - // - // +kubebuilder:validation:MaxItems=25 - // +optional - ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` - - // EndpointAccess specifies the publishing scope of cluster endpoints. The - // default is Public. - // - // +kubebuilder:validation:Enum=Public;PublicAndPrivate;Private - // +kubebuilder:default=Public - // +optional - EndpointAccess AWSEndpointAccessType `json:"endpointAccess,omitempty"` - - // AdditionalAllowedPrincipals specifies a list of additional allowed principal ARNs - // to be added to the hosted control plane's VPC Endpoint Service to enable additional - // VPC Endpoint connection requests to be automatically accepted. - // See https://docs.aws.amazon.com/vpc/latest/privatelink/configure-endpoint-service.html - // for more details around VPC Endpoint Service allowed principals. - // - // +optional - AdditionalAllowedPrincipals []string `json:"additionalAllowedPrincipals,omitempty"` - - // MultiArch specifies whether the Hosted Cluster will be expected to support NodePools with different - // CPU architectures, i.e., supporting arm64 NodePools and supporting amd64 NodePools on the same Hosted Cluster. - // +kubebuilder:default=false - // +optional - MultiArch bool `json:"multiArch"` -} - -type AWSRoleCredentials struct { - ARN string `json:"arn"` - Namespace string `json:"namespace"` - Name string `json:"name"` -} - -// AWSResourceTag is a tag to apply to AWS resources created for the cluster. -type AWSResourceTag struct { - // Key is the key of the tag. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=128 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` - Key string `json:"key"` - // Value is the value of the tag. - // - // Some AWS service do not support empty values. Since tags are added to - // resources in many services, the length of the tag value must meet the - // requirements of all services. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` - Value string `json:"value"` -} - -// AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API. -type AWSRolesRef struct { - // The referenced role must have a trust relationship that allows it to be assumed via web identity. - // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. - // Example: - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Principal": { - // "Federated": "{{ .ProviderARN }}" - // }, - // "Action": "sts:AssumeRoleWithWebIdentity", - // "Condition": { - // "StringEquals": { - // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} - // } - // } - // } - // ] - // } - // - // IngressARN is an ARN value referencing a role appropriate for the Ingress Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "elasticloadbalancing:DescribeLoadBalancers", - // "tag:GetResources", - // "route53:ListHostedZones" - // ], - // "Resource": "*" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "route53:ChangeResourceRecordSets" - // ], - // "Resource": [ - // "arn:aws:route53:::PUBLIC_ZONE_ID", - // "arn:aws:route53:::PRIVATE_ZONE_ID" - // ] - // } - // ] - // } - IngressARN string `json:"ingressARN"` - - // ImageRegistryARN is an ARN value referencing a role appropriate for the Image Registry Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "s3:CreateBucket", - // "s3:DeleteBucket", - // "s3:PutBucketTagging", - // "s3:GetBucketTagging", - // "s3:PutBucketPublicAccessBlock", - // "s3:GetBucketPublicAccessBlock", - // "s3:PutEncryptionConfiguration", - // "s3:GetEncryptionConfiguration", - // "s3:PutLifecycleConfiguration", - // "s3:GetLifecycleConfiguration", - // "s3:GetBucketLocation", - // "s3:ListBucket", - // "s3:GetObject", - // "s3:PutObject", - // "s3:DeleteObject", - // "s3:ListBucketMultipartUploads", - // "s3:AbortMultipartUpload", - // "s3:ListMultipartUploadParts" - // ], - // "Resource": "*" - // } - // ] - // } - ImageRegistryARN string `json:"imageRegistryARN"` - - // StorageARN is an ARN value referencing a role appropriate for the Storage Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "ec2:AttachVolume", - // "ec2:CreateSnapshot", - // "ec2:CreateTags", - // "ec2:CreateVolume", - // "ec2:DeleteSnapshot", - // "ec2:DeleteTags", - // "ec2:DeleteVolume", - // "ec2:DescribeInstances", - // "ec2:DescribeSnapshots", - // "ec2:DescribeTags", - // "ec2:DescribeVolumes", - // "ec2:DescribeVolumesModifications", - // "ec2:DetachVolume", - // "ec2:ModifyVolume" - // ], - // "Resource": "*" - // } - // ] - // } - StorageARN string `json:"storageARN"` - - // NetworkARN is an ARN value referencing a role appropriate for the Network Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "ec2:DescribeInstances", - // "ec2:DescribeInstanceStatus", - // "ec2:DescribeInstanceTypes", - // "ec2:UnassignPrivateIpAddresses", - // "ec2:AssignPrivateIpAddresses", - // "ec2:UnassignIpv6Addresses", - // "ec2:AssignIpv6Addresses", - // "ec2:DescribeSubnets", - // "ec2:DescribeNetworkInterfaces" - // ], - // "Resource": "*" - // } - // ] - // } - NetworkARN string `json:"networkARN"` - - // KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC. - // Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Action": [ - // "autoscaling:DescribeAutoScalingGroups", - // "autoscaling:DescribeLaunchConfigurations", - // "autoscaling:DescribeTags", - // "ec2:DescribeAvailabilityZones", - // "ec2:DescribeInstances", - // "ec2:DescribeImages", - // "ec2:DescribeRegions", - // "ec2:DescribeRouteTables", - // "ec2:DescribeSecurityGroups", - // "ec2:DescribeSubnets", - // "ec2:DescribeVolumes", - // "ec2:CreateSecurityGroup", - // "ec2:CreateTags", - // "ec2:CreateVolume", - // "ec2:ModifyInstanceAttribute", - // "ec2:ModifyVolume", - // "ec2:AttachVolume", - // "ec2:AuthorizeSecurityGroupIngress", - // "ec2:CreateRoute", - // "ec2:DeleteRoute", - // "ec2:DeleteSecurityGroup", - // "ec2:DeleteVolume", - // "ec2:DetachVolume", - // "ec2:RevokeSecurityGroupIngress", - // "ec2:DescribeVpcs", - // "elasticloadbalancing:AddTags", - // "elasticloadbalancing:AttachLoadBalancerToSubnets", - // "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - // "elasticloadbalancing:CreateLoadBalancer", - // "elasticloadbalancing:CreateLoadBalancerPolicy", - // "elasticloadbalancing:CreateLoadBalancerListeners", - // "elasticloadbalancing:ConfigureHealthCheck", - // "elasticloadbalancing:DeleteLoadBalancer", - // "elasticloadbalancing:DeleteLoadBalancerListeners", - // "elasticloadbalancing:DescribeLoadBalancers", - // "elasticloadbalancing:DescribeLoadBalancerAttributes", - // "elasticloadbalancing:DetachLoadBalancerFromSubnets", - // "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - // "elasticloadbalancing:ModifyLoadBalancerAttributes", - // "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - // "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - // "elasticloadbalancing:AddTags", - // "elasticloadbalancing:CreateListener", - // "elasticloadbalancing:CreateTargetGroup", - // "elasticloadbalancing:DeleteListener", - // "elasticloadbalancing:DeleteTargetGroup", - // "elasticloadbalancing:DeregisterTargets", - // "elasticloadbalancing:DescribeListeners", - // "elasticloadbalancing:DescribeLoadBalancerPolicies", - // "elasticloadbalancing:DescribeTargetGroups", - // "elasticloadbalancing:DescribeTargetHealth", - // "elasticloadbalancing:ModifyListener", - // "elasticloadbalancing:ModifyTargetGroup", - // "elasticloadbalancing:RegisterTargets", - // "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - // "iam:CreateServiceLinkedRole", - // "kms:DescribeKey" - // ], - // "Resource": [ - // "*" - // ], - // "Effect": "Allow" - // } - // ] - // } - // +immutable - KubeCloudControllerARN string `json:"kubeCloudControllerARN"` - - // NodePoolManagementARN is an ARN value referencing a role appropriate for the CAPI Controller. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Action": [ - // "ec2:AssociateRouteTable", - // "ec2:AttachInternetGateway", - // "ec2:AuthorizeSecurityGroupIngress", - // "ec2:CreateInternetGateway", - // "ec2:CreateNatGateway", - // "ec2:CreateRoute", - // "ec2:CreateRouteTable", - // "ec2:CreateSecurityGroup", - // "ec2:CreateSubnet", - // "ec2:CreateTags", - // "ec2:DeleteInternetGateway", - // "ec2:DeleteNatGateway", - // "ec2:DeleteRouteTable", - // "ec2:DeleteSecurityGroup", - // "ec2:DeleteSubnet", - // "ec2:DeleteTags", - // "ec2:DescribeAccountAttributes", - // "ec2:DescribeAddresses", - // "ec2:DescribeAvailabilityZones", - // "ec2:DescribeImages", - // "ec2:DescribeInstances", - // "ec2:DescribeInternetGateways", - // "ec2:DescribeNatGateways", - // "ec2:DescribeNetworkInterfaces", - // "ec2:DescribeNetworkInterfaceAttribute", - // "ec2:DescribeRouteTables", - // "ec2:DescribeSecurityGroups", - // "ec2:DescribeSubnets", - // "ec2:DescribeVpcs", - // "ec2:DescribeVpcAttribute", - // "ec2:DescribeVolumes", - // "ec2:DetachInternetGateway", - // "ec2:DisassociateRouteTable", - // "ec2:DisassociateAddress", - // "ec2:ModifyInstanceAttribute", - // "ec2:ModifyNetworkInterfaceAttribute", - // "ec2:ModifySubnetAttribute", - // "ec2:RevokeSecurityGroupIngress", - // "ec2:RunInstances", - // "ec2:TerminateInstances", - // "tag:GetResources", - // "ec2:CreateLaunchTemplate", - // "ec2:CreateLaunchTemplateVersion", - // "ec2:DescribeLaunchTemplates", - // "ec2:DescribeLaunchTemplateVersions", - // "ec2:DeleteLaunchTemplate", - // "ec2:DeleteLaunchTemplateVersions" - // ], - // "Resource": [ - // "*" - // ], - // "Effect": "Allow" - // }, - // { - // "Condition": { - // "StringLike": { - // "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" - // } - // }, - // "Action": [ - // "iam:CreateServiceLinkedRole" - // ], - // "Resource": [ - // "arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing" - // ], - // "Effect": "Allow" - // }, - // { - // "Action": [ - // "iam:PassRole" - // ], - // "Resource": [ - // "arn:*:iam::*:role/*-worker-role" - // ], - // "Effect": "Allow" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "kms:Decrypt", - // "kms:ReEncrypt", - // "kms:GenerateDataKeyWithoutPlainText", - // "kms:DescribeKey" - // ], - // "Resource": "*" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "kms:CreateGrant" - // ], - // "Resource": "*", - // "Condition": { - // "Bool": { - // "kms:GrantIsForAWSResource": true - // } - // } - // } - // ] - // } - // - // +immutable - NodePoolManagementARN string `json:"nodePoolManagementARN"` - - // ControlPlaneOperatorARN is an ARN value referencing a role appropriate for the Control Plane Operator. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "ec2:CreateVpcEndpoint", - // "ec2:DescribeVpcEndpoints", - // "ec2:ModifyVpcEndpoint", - // "ec2:DeleteVpcEndpoints", - // "ec2:CreateTags", - // "route53:ListHostedZones", - // "ec2:CreateSecurityGroup", - // "ec2:AuthorizeSecurityGroupIngress", - // "ec2:AuthorizeSecurityGroupEgress", - // "ec2:DeleteSecurityGroup", - // "ec2:RevokeSecurityGroupIngress", - // "ec2:RevokeSecurityGroupEgress", - // "ec2:DescribeSecurityGroups", - // "ec2:DescribeVpcs", - // ], - // "Resource": "*" - // }, - // { - // "Effect": "Allow", - // "Action": [ - // "route53:ChangeResourceRecordSets", - // "route53:ListResourceRecordSets" - // ], - // "Resource": "arn:aws:route53:::%s" - // } - // ] - // } - // +immutable - ControlPlaneOperatorARN string `json:"controlPlaneOperatorARN"` -} - -// AWSServiceEndpoint stores the configuration for services to -// override existing defaults of AWS Services. -type AWSServiceEndpoint struct { - // Name is the name of the AWS service. - // This must be provided and cannot be empty. - Name string `json:"name"` - - // URL is fully qualified URI with scheme https, that overrides the default generated - // endpoint for a client. - // This must be provided and cannot be empty. - // - // +kubebuilder:validation:Pattern=`^https://` - URL string `json:"url"` -} - -// AzurePlatformSpec specifies configuration for clusters running on Azure. Generally, the HyperShift API assumes bring -// your own (BYO) cloud infrastructure resources. For example, resources like a resource group, a subnet, or a vnet -// would be pre-created and then their names would be used respectively in the ResourceGroupName, SubnetName, VnetName -// fields of the Hosted Cluster CR. An existing cloud resource is expected to exist under the same SubscriptionID. -type AzurePlatformSpec struct { - // Credentials is the object containing existing Azure credentials needed for creating and managing cloud - // infrastructure resources. - // - // +kubebuilder:validation:Required - // +required - Credentials corev1.LocalObjectReference `json:"credentials"` - - // Cloud is the cloud environment identifier, valid values could be found here: https://github.com/Azure/go-autorest/blob/4c0e21ca2bbb3251fe7853e6f9df6397f53dd419/autorest/azure/environments.go#L33 - // - // +kubebuilder:validation:Enum=AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud - // +kubebuilder:default="AzurePublicCloud" - Cloud string `json:"cloud,omitempty"` - - // Location is the Azure region in where all the cloud infrastructure resources will be created. - // - // Example: eastus - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Location is immutable" - // +immutable - // +required - Location string `json:"location"` - - // ResourceGroupName is the name of an existing resource group where all cloud resources created by the Hosted - // Cluster are to be placed. The resource group is expected to exist under the same subscription as SubscriptionID. - // - // In ARO HCP, this will be the managed resource group where customer cloud resources will be created. - // - // Resource group naming requirements can be found here: https://azure.github.io/PSRule.Rules.Azure/en/rules/Azure.ResourceGroup.Name/. - // - //Example: if your resource group ID is /subscriptions//resourceGroups/, your - // ResourceGroupName is . - // - // +kubebuilder:default:=default - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_()\-\.]{1,89}[a-zA-Z0-9_()\-]$` - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ResourceGroupName is immutable" - // +immutable - // +required - ResourceGroupName string `json:"resourceGroup"` - - // VnetID is the ID of an existing VNET to use in creating VMs. The VNET can exist in a different resource group - // other than the one specified in ResourceGroupName, but it must exist under the same subscription as - // SubscriptionID. - // - // In ARO HCP, this will be the ID of the customer provided VNET. - // - // Example: /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks/ - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="VnetID is immutable" - // +immutable - // +required - VnetID string `json:"vnetID,omitempty"` - - // SubnetID is the subnet ID of an existing subnet where the load balancer for node egress will be created. This - // subnet is expected to be a subnet within the VNET specified in VnetID. This subnet is expected to exist under the - // same subscription as SubscriptionID. - // - // In ARO HCP, managed services will create the aforementioned load balancer in ResourceGroupName. - // - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="SubnetID is immutable" - // +kubebuilder:validation:Required - // +immutable - // +required - SubnetID string `json:"subnetID"` - - // SubscriptionID is a unique identifier for an Azure subscription used to manage resources. - // - // +kubebuilder:validation:Required - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="SubscriptionID is immutable" - // +immutable - // +required - SubscriptionID string `json:"subscriptionID"` - - // SecurityGroupID is the ID of an existing security group on the SubnetID. This field is provided as part of the - // configuration for the Azure cloud provider, aka Azure cloud controller manager (CCM). This security group is - // expected to exist under the same subscription as SubscriptionID. - // - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="SecurityGroupID is immutable" - // +kubebuilder:validation:Required - // +immutable - // +required - SecurityGroupID string `json:"securityGroupID,omitempty"` -} - -// OpenStackPlatformSpec specifies configuration for clusters running on OpenStack. -type OpenStackPlatformSpec struct { - // IdentityRef is a reference to a secret holding OpenStack credentials - // to be used when reconciling the hosted cluster. - // - // +kubebuilder:validation:Required - // +required - IdentityRef OpenStackIdentityReference `json:"identityRef"` - - // ManagedSubnets describe the OpenStack Subnet to be created. Cluster actuator will create a network, - // and a subnet with the defined DNSNameservers, AllocationPools and the CIDR defined in the HostedCluster - // MachineNetwork, and a router connected to the subnet. Currently only one IPv4 - // subnet is supported. - // - // +kubebuilder:validation:MaxItems=1 - // +listType=atomic - // +optional - ManagedSubnets []SubnetSpec `json:"managedSubnets,omitempty"` - - // Router specifies an existing router to be used if ManagedSubnets are - // specified. If specified, no new router will be created. - // - // +optional - Router *RouterParam `json:"router,omitempty"` - - // Network specifies an existing network to use if no ManagedSubnets - // are specified. - // +optional - Network *NetworkParam `json:"network,omitempty"` - - // Subnets specifies existing subnets to use if not ManagedSubnets are - // specified. All subnets must be in the network specified by Network. - // There can be zero, one, or two subnets. If no subnets are specified, - // all subnets in Network will be used. If 2 subnets are specified, one - // must be IPv4 and the other IPv6. - // - // +kubebuilder:validation:MaxItems=2 - // +listType=atomic - // +optional - Subnets []SubnetParam `json:"subnets,omitempty"` - - // NetworkMTU sets the maximum transmission unit (MTU) value to address fragmentation for the private network ID. - // This value will be used only if the Cluster actuator creates the network. - // If left empty, the network will have the default MTU defined in Openstack network service. - // To use this field, the Openstack installation requires the net-mtu neutron API extension. - // - // +optional - NetworkMTU *int `json:"networkMTU,omitempty"` - - // ExternalNetwork is the OpenStack Network to be used to get public internet to the VMs. - // This option is ignored if DisableExternalNetwork is set to true. - // - // If ExternalNetwork is defined it must refer to exactly one external network. - // - // If ExternalNetwork is not defined or is empty the controller will use any - // existing external network as long as there is only one. It is an - // error if ExternalNetwork is not defined and there are multiple - // external networks unless DisableExternalNetwork is also set. - // - // If ExternalNetwork is not defined and there are no external networks - // the controller will proceed as though DisableExternalNetwork was set. - // - // +optional - ExternalNetwork *NetworkParam `json:"externalNetwork,omitempty"` - - // DisableExternalNetwork specifies whether or not to attempt to connect the cluster - // to an external network. This allows for the creation of clusters when connecting - // to an external network is not possible or desirable, e.g. if using a provider network. - // - // +optional - DisableExternalNetwork *bool `json:"disableExternalNetwork,omitempty"` - - // Tags to set on all resources in cluster which support tags - // - // +listType=set - // +optional - Tags []string `json:"tags,omitempty"` -} - -// OpenStackIdentityReference is a reference to an infrastructure -// provider identity to be used to provision cluster resources. -type OpenStackIdentityReference struct { - // Name is the name of a secret in the same namespace as the resource being provisioned. - // The secret must contain a key named `clouds.yaml` which contains an OpenStack clouds.yaml file. - // The secret may optionally contain a key named `cacert` containing a PEM-encoded CA certificate. - // - // +kubebuilder:validation:Required - // +required - Name string `json:"name"` - - // CloudName specifies the name of the entry in the clouds.yaml file to use. - // - // +kubebuilder:validation:Required - // +required - CloudName string `json:"cloudName"` -} - -type SubnetSpec struct { - // DNSNameservers holds a list of DNS server addresses that will be provided when creating - // the subnet. These addresses need to have the same IP version as CIDR. - // - // +optional - DNSNameservers []string `json:"dnsNameservers,omitempty"` - - // AllocationPools is an array of AllocationPool objects that will be applied to OpenStack Subnet being created. - // If set, OpenStack will only allocate these IPs for Machines. It will still be possible to create ports from - // outside of these ranges manually. - // - // +optional - AllocationPools []AllocationPool `json:"allocationPools,omitempty"` -} - -type AllocationPool struct { - // Start represents the start of the AllocationPool, that is the lowest IP of the pool. - // - // +kubebuilder:validation:Required - // +required - Start string `json:"start"` - - // End represents the end of the AlloctionPool, that is the highest IP of the pool. - // - // +kubebuilder:validation:Required - // +required - End string `json:"end"` -} - -// RouterParam specifies an OpenStack router to use. It may be specified by either ID or filter, but not both. -// +kubebuilder:validation:MaxProperties:=1 -// +kubebuilder:validation:MinProperties:=1 -type RouterParam struct { - // ID is the ID of the router to use. If ID is provided, the other filters cannot be provided. Must be in UUID format. - // - // +kubebuilder:validation:Format:=uuid - // +optional - ID *string `json:"id,omitempty"` - - // Filter specifies a filter to select an OpenStack router. If provided, cannot be empty. - // - // +optional - Filter *RouterFilter `json:"filter,omitempty"` -} - -// RouterFilter specifies a query to select an OpenStack router. At least one property must be set. -// +kubebuilder:validation:MinProperties:=1 -type RouterFilter struct { - // Name is the name of the router to filter by. - // - // +optional - Name string `json:"name,omitempty"` - - // Description is the description of the router to filter by. - // - // +optional - Description string `json:"description,omitempty"` - - // ProjectID is the project ID of the router to filter by. - // - // +optional - ProjectID string `json:"projectID,omitempty"` - - // FilterByNeutronTags specifies tags to filter by. - // - // +optional - FilterByNeutronTags `json:",inline"` -} - -// NetworkParam specifies an OpenStack network. It may be specified by either ID or Filter, but not both. -// +kubebuilder:validation:MaxProperties:=1 -// +kubebuilder:validation:MinProperties:=1 -type NetworkParam struct { - // ID is the ID of the network to use. If ID is provided, the other filters cannot be provided. Must be in UUID format. - // - // +kubebuilder:validation:Format:=uuid - // +optional - ID *string `json:"id,omitempty"` - - // Filter specifies a filter to select an OpenStack network. If provided, cannot be empty. - // - // +optional - Filter *NetworkFilter `json:"filter,omitempty"` -} - -// NetworkFilter specifies a query to select an OpenStack network. At least one property must be set. -// +kubebuilder:validation:MinProperties:=1 -type NetworkFilter struct { - // Name is the name of the network to filter by. - // - // +optional - Name string `json:"name,omitempty"` - - // Description is the description of the network to filter by. - // - // +optional - Description string `json:"description,omitempty"` - - // ProjectID is the project ID of the network to filter by. - // - // +optional - ProjectID string `json:"projectID,omitempty"` - - // FilterByNeutronTags specifies tags to filter by. - // - // +optional - FilterByNeutronTags `json:",inline"` -} - -// NeutronTag represents a tag on a Neutron resource. -// It may not be empty and may not contain commas. -// +kubebuilder:validation:Pattern:="^[^,]+$" -// +kubebuilder:validation:MinLength:=1 -type NeutronTag string - -type FilterByNeutronTags struct { - // Tags is a list of tags to filter by. If specified, the resource must - // have all of the tags specified to be included in the result. - // - // +listType=set - // +optional - Tags []NeutronTag `json:"tags,omitempty"` - - // TagsAny is a list of tags to filter by. If specified, the resource - // must have at least one of the tags specified to be included in the - // result. - // - // +listType=set - // +optional - TagsAny []NeutronTag `json:"tagsAny,omitempty"` - - // NotTags is a list of tags to filter by. If specified, resources which - // contain all of the given tags will be excluded from the result. - // - // +listType=set - // +optional - NotTags []NeutronTag `json:"notTags,omitempty"` - - // NotTagsAny is a list of tags to filter by. If specified, resources - // which contain any of the given tags will be excluded from the result. - // - // +listType=set - // +optional - NotTagsAny []NeutronTag `json:"notTagsAny,omitempty"` -} - -// SubnetParam specifies an OpenStack subnet to use. It may be specified by either ID or filter, but not both. -// +kubebuilder:validation:MaxProperties:=1 -// +kubebuilder:validation:MinProperties:=1 -type SubnetParam struct { - // ID is the uuid of the subnet. It will not be validated. - // - // +kubebuilder:validation:Format:=uuid - // +optional - ID *string `json:"id,omitempty"` - - // Filter specifies a filter to select the subnet. It must match exactly one subnet. - // - // +optional - Filter *SubnetFilter `json:"filter,omitempty"` -} - -// SubnetFilter specifies a filter to select a subnet. At least one parameter must be specified. -// +kubebuilder:validation:MinProperties:=1 -type SubnetFilter struct { - // Name is the name of the subnet to filter by. - // - // +optional - Name string `json:"name,omitempty"` - // Description is the description of the subnet to filter by. - // - // +optional - Description string `json:"description,omitempty"` - - // ProjectID is the project ID of the subnet to filter by. - // - // +optional - ProjectID string `json:"projectID,omitempty"` - - // IPVersion is the IP version of the subnet to filter by. - // - // +optional - IPVersion int `json:"ipVersion,omitempty"` - - // GatewayIP is the gateway IP of the subnet to filter by. - // - // +optional - GatewayIP string `json:"gatewayIP,omitempty"` - - // CIDR is the CIDR of the subnet to filter by. - // - // +optional - CIDR string `json:"cidr,omitempty"` - - // IPv6AddressMode is the IPv6 address mode of the subnet to filter by. - // - // +optional - IPv6AddressMode string `json:"ipv6AddressMode,omitempty"` - - // IPv6RAMode is the IPv6 RA mode of the subnet to filter by. - // - // +optional - IPv6RAMode string `json:"ipv6RAMode,omitempty"` - - // FilterByNeutronTags specifies tags to filter by. - // - // +optional - FilterByNeutronTags `json:",inline"` -} - -// Release represents the metadata for an OCP release payload image. -type Release struct { - // Image is the image pullspec of an OCP release payload image. - // - // +kubebuilder:validation:Pattern=^(\w+\S+)$ - Image string `json:"image"` -} - -// ClusterAutoscaling specifies auto-scaling behavior that applies to all -// NodePools associated with a control plane. -type ClusterAutoscaling struct { - // MaxNodesTotal is the maximum allowable number of nodes across all NodePools - // for a HostedCluster. The autoscaler will not grow the cluster beyond this - // number. - // - // +kubebuilder:validation:Minimum=0 - MaxNodesTotal *int32 `json:"maxNodesTotal,omitempty"` - - // MaxPodGracePeriod is the maximum seconds to wait for graceful pod - // termination before scaling down a NodePool. The default is 600 seconds. - // - // +kubebuilder:validation:Minimum=0 - MaxPodGracePeriod *int32 `json:"maxPodGracePeriod,omitempty"` - - // MaxNodeProvisionTime is the maximum time to wait for node provisioning - // before considering the provisioning to be unsuccessful, expressed as a Go - // duration string. The default is 15 minutes. - // - // +kubebuilder:validation:Pattern=^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ - MaxNodeProvisionTime string `json:"maxNodeProvisionTime,omitempty"` - - // PodPriorityThreshold enables users to schedule "best-effort" pods, which - // shouldn't trigger autoscaler actions, but only run when there are spare - // resources available. The default is -10. - // - // See the following for more details: - // https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-does-cluster-autoscaler-work-with-pod-priority-and-preemption - // - // +optional - PodPriorityThreshold *int32 `json:"podPriorityThreshold,omitempty"` -} - -// EtcdManagementType is a enum specifying the strategy for managing the cluster's etcd instance -// +kubebuilder:validation:Enum=Managed;Unmanaged -type EtcdManagementType string - -const ( - // Managed means HyperShift should provision and operator the etcd cluster - // automatically. - Managed EtcdManagementType = "Managed" - - // Unmanaged means HyperShift will not provision or manage the etcd cluster, - // and the user is responsible for doing so. - Unmanaged EtcdManagementType = "Unmanaged" -) - -// EtcdSpec specifies configuration for a control plane etcd cluster. -type EtcdSpec struct { - // ManagementType defines how the etcd cluster is managed. - // - // +unionDiscriminator - // +immutable - ManagementType EtcdManagementType `json:"managementType"` - - // Managed specifies the behavior of an etcd cluster managed by HyperShift. - // - // +optional - // +immutable - Managed *ManagedEtcdSpec `json:"managed,omitempty"` - - // Unmanaged specifies configuration which enables the control plane to - // integrate with an eternally managed etcd cluster. - // - // +optional - // +immutable - Unmanaged *UnmanagedEtcdSpec `json:"unmanaged,omitempty"` -} - -// ManagedEtcdSpec specifies the behavior of an etcd cluster managed by -// HyperShift. -type ManagedEtcdSpec struct { - // Storage specifies how etcd data is persisted. - Storage ManagedEtcdStorageSpec `json:"storage"` -} - -// ManagedEtcdStorageType is a storage type for an etcd cluster. -// -// +kubebuilder:validation:Enum=PersistentVolume -type ManagedEtcdStorageType string - -const ( - // PersistentVolumeEtcdStorage uses PersistentVolumes for etcd storage. - PersistentVolumeEtcdStorage ManagedEtcdStorageType = "PersistentVolume" -) - -var ( - DefaultPersistentVolumeEtcdStorageSize resource.Quantity = resource.MustParse("8Gi") -) - -// ManagedEtcdStorageSpec describes the storage configuration for etcd data. -type ManagedEtcdStorageSpec struct { - // Type is the kind of persistent storage implementation to use for etcd. - // - // +immutable - // +unionDiscriminator - Type ManagedEtcdStorageType `json:"type"` - - // PersistentVolume is the configuration for PersistentVolume etcd storage. - // With this implementation, a PersistentVolume will be allocated for every - // etcd member (either 1 or 3 depending on the HostedCluster control plane - // availability configuration). - // - // +optional - PersistentVolume *PersistentVolumeEtcdStorageSpec `json:"persistentVolume,omitempty"` - - // RestoreSnapshotURL allows an optional URL to be provided where - // an etcd snapshot can be downloaded, for example a pre-signed URL - // referencing a storage service. - // This snapshot will be restored on initial startup, only when the etcd PV - // is empty. - // - // +optional - // +immutable - // +kubebuilder:validation:XValidation:rule="self.size() <= 1", message="RestoreSnapshotURL shouldn't contain more than 1 entry" - RestoreSnapshotURL []string `json:"restoreSnapshotURL,omitempty"` -} - -// PersistentVolumeEtcdStorageSpec is the configuration for PersistentVolume -// etcd storage. -type PersistentVolumeEtcdStorageSpec struct { - // StorageClassName is the StorageClass of the data volume for each etcd member. - // - // See https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. - // - // +optional - // +immutable - StorageClassName *string `json:"storageClassName,omitempty"` - - // Size is the minimum size of the data volume for each etcd member. - // - // +optional - // +kubebuilder:default="8Gi" - // +immutable - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Etcd PV storage size is immutable" - Size *resource.Quantity `json:"size,omitempty"` -} - -// UnmanagedEtcdSpec specifies configuration which enables the control plane to -// integrate with an eternally managed etcd cluster. -type UnmanagedEtcdSpec struct { - // Endpoint is the full etcd cluster client endpoint URL. For example: - // - // https://etcd-client:2379 - // - // If the URL uses an HTTPS scheme, the TLS field is required. - // - // +kubebuilder:validation:Pattern=`^https://` - Endpoint string `json:"endpoint"` - - // TLS specifies TLS configuration for HTTPS etcd client endpoints. + // tls specifies TLS configuration for HTTPS etcd client endpoints. + //+required TLS EtcdTLSConfig `json:"tls"` } @@ -2368,169 +1323,42 @@ type KMSSpec struct { Azure *AzureKMSSpec `json:"azure,omitempty"` } -// AzureKMSSpec defines metadata about the configuration of the Azure KMS Secret Encryption provider using Azure key vault -type AzureKMSSpec struct { +// AESCBCSpec defines metadata about the AESCBC secret encryption strategy +type AESCBCSpec struct { // ActiveKey defines the active key used to encrypt new secrets - // - // +kubebuilder:validation:Required - ActiveKey AzureKMSKey `json:"activeKey"` + ActiveKey corev1.LocalObjectReference `json:"activeKey"` // BackupKey defines the old key during the rotation process so previously created // secrets can continue to be decrypted until they are all re-encrypted with the active key. // +optional - BackupKey *AzureKMSKey `json:"backupKey,omitempty"` -} - -type AzureKMSKey struct { - // KeyVaultName is the name of the keyvault. Must match criteria specified at https://docs.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates#vault-name-and-object-name - // Your Microsoft Entra application used to create the cluster must be authorized to access this keyvault, e.g using the AzureCLI: - // `az keyvault set-policy -n $KEYVAULT_NAME --key-permissions decrypt encrypt --spn ` - KeyVaultName string `json:"keyVaultName"` - // KeyName is the name of the keyvault key used for encrypt/decrypt - KeyName string `json:"keyName"` - // KeyVersion contains the version of the key to use - KeyVersion string `json:"keyVersion"` -} - -// IBMCloudKMSSpec defines metadata for the IBM Cloud KMS encryption strategy -type IBMCloudKMSSpec struct { - // Region is the IBM Cloud region - Region string `json:"region"` - // Auth defines metadata for how authentication is done with IBM Cloud KMS - Auth IBMCloudKMSAuthSpec `json:"auth"` - // KeyList defines the list of keys used for data encryption - KeyList []IBMCloudKMSKeyEntry `json:"keyList"` -} - -// IBMCloudKMSKeyEntry defines metadata for an IBM Cloud KMS encryption key -type IBMCloudKMSKeyEntry struct { - // CRKID is the customer rook key id - CRKID string `json:"crkID"` - // InstanceID is the id for the key protect instance - InstanceID string `json:"instanceID"` - // CorrelationID is an identifier used to track all api call usage from hypershift - CorrelationID string `json:"correlationID"` - // URL is the url to call key protect apis over - // +kubebuilder:validation:Pattern=`^https://` - URL string `json:"url"` - // KeyVersion is a unique number associated with the key. The number increments whenever a new - // key is enabled for data encryption. - KeyVersion int `json:"keyVersion"` -} - -// IBMCloudKMSAuthSpec defines metadata for how authentication is done with IBM Cloud KMS -type IBMCloudKMSAuthSpec struct { - // Type defines the IBM Cloud KMS authentication strategy - // +unionDiscriminator - Type IBMCloudKMSAuthType `json:"type"` - // Unmanaged defines the auth metadata the customer provides to interact with IBM Cloud KMS - // +optional - Unmanaged *IBMCloudKMSUnmanagedAuthSpec `json:"unmanaged,omitempty"` - // Managed defines metadata around the service to service authentication strategy for the IBM Cloud - // KMS system (all provider managed). - // +optional - Managed *IBMCloudKMSManagedAuthSpec `json:"managed,omitempty"` + BackupKey *corev1.LocalObjectReference `json:"backupKey,omitempty"` } -// IBMCloudKMSAuthType defines the IBM Cloud KMS authentication strategy -// +kubebuilder:validation:Enum=Managed;Unmanaged -type IBMCloudKMSAuthType string +type PayloadArchType string const ( - // IBMCloudKMSManagedAuth defines the KMS authentication strategy where the IKS/ROKS platform uses - // service to service auth to call IBM Cloud KMS APIs (no customer credentials requried) - IBMCloudKMSManagedAuth IBMCloudKMSAuthType = "Managed" - // IBMCloudKMSUnmanagedAuth defines the KMS authentication strategy where a customer supplies IBM Cloud - // authentication to interact with IBM Cloud KMS APIs - IBMCloudKMSUnmanagedAuth IBMCloudKMSAuthType = "Unmanaged" + AMD64 PayloadArchType = "AMD64" + PPC64LE PayloadArchType = "PPC64LE" + S390X PayloadArchType = "S390X" + ARM64 PayloadArchType = "ARM64" + Multi PayloadArchType = "Multi" ) -// IBMCloudKMSUnmanagedAuthSpec defines the auth metadata the customer provides to interact with IBM Cloud KMS -type IBMCloudKMSUnmanagedAuthSpec struct { - // Credentials should reference a secret with a key field of IBMCloudIAMAPIKeySecretKey that contains a apikey to - // call IBM Cloud KMS APIs - Credentials corev1.LocalObjectReference `json:"credentials"` -} - -// IBMCloudKMSManagedAuthSpec defines metadata around the service to service authentication strategy for the IBM Cloud -// KMS system (all provider managed). -type IBMCloudKMSManagedAuthSpec struct { -} - -// AWSKMSSpec defines metadata about the configuration of the AWS KMS Secret Encryption provider -type AWSKMSSpec struct { - // Region contains the AWS region - Region string `json:"region"` - // ActiveKey defines the active key used to encrypt new secrets - ActiveKey AWSKMSKeyEntry `json:"activeKey"` - // BackupKey defines the old key during the rotation process so previously created - // secrets can continue to be decrypted until they are all re-encrypted with the active key. - // +optional - BackupKey *AWSKMSKeyEntry `json:"backupKey,omitempty"` - // Auth defines metadata about the management of credentials used to interact with AWS KMS - Auth AWSKMSAuthSpec `json:"auth"` -} - -// AWSKMSAuthSpec defines metadata about the management of credentials used to interact and encrypt data via AWS KMS key. -type AWSKMSAuthSpec struct { - // The referenced role must have a trust relationship that allows it to be assumed via web identity. - // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html. - // Example: - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Principal": { - // "Federated": "{{ .ProviderARN }}" - // }, - // "Action": "sts:AssumeRoleWithWebIdentity", - // "Condition": { - // "StringEquals": { - // "{{ .ProviderName }}:sub": {{ .ServiceAccounts }} - // } - // } - // } - // ] - // } - // - // AWSKMSARN is an ARN value referencing a role appropriate for managing the auth via the AWS KMS key. - // - // The following is an example of a valid policy document: - // - // { - // "Version": "2012-10-17", - // "Statement": [ - // { - // "Effect": "Allow", - // "Action": [ - // "kms:Encrypt", - // "kms:Decrypt", - // "kms:ReEncrypt*", - // "kms:GenerateDataKey*", - // "kms:DescribeKey" - // ], - // "Resource": %q - // } - // ] - // } - AWSKMSRoleARN string `json:"awsKms"` -} - -// AWSKMSKeyEntry defines metadata to locate the encryption key in AWS -type AWSKMSKeyEntry struct { - // ARN is the Amazon Resource Name for the encryption key - // +kubebuilder:validation:Pattern=`^arn:` - ARN string `json:"arn"` -} - -// AESCBCSpec defines metadata about the AESCBC secret encryption strategy -type AESCBCSpec struct { - // ActiveKey defines the active key used to encrypt new secrets - ActiveKey corev1.LocalObjectReference `json:"activeKey"` - // BackupKey defines the old key during the rotation process so previously created - // secrets can continue to be decrypted until they are all re-encrypted with the active key. - // +optional - BackupKey *corev1.LocalObjectReference `json:"backupKey,omitempty"` +// ToPayloadArch converts a string to payloadArch. +func ToPayloadArch(arch string) PayloadArchType { + switch arch { + case "amd64", string(AMD64): + return AMD64 + case "arm64", string(ARM64): + return ARM64 + case "ppc64le", string(PPC64LE): + return PPC64LE + case "s390x", string(S390X): + return S390X + case "multi", string(Multi): + return Multi + default: + return "" + } } // HostedClusterStatus is the latest observed status of a HostedCluster. @@ -2577,6 +1405,12 @@ type HostedClusterStatus struct { // +patchStrategy=merge Conditions []metav1.Condition `json:"conditions,omitempty"` + // payloadArch represents the CPU architecture type of the HostedCluster.Spec.Release.Image. The valid values are: + // Multi, ARM64, AMD64, S390X, or PPC64LE. + // +kubebuilder:validation:Enum=Multi;ARM64;AMD64;PPC64LE;S390X + // +optional + PayloadArch PayloadArchType `json:"payloadArch,omitempty"` + // Platform contains platform-specific status of the HostedCluster // +optional Platform *PlatformStatus `json:"platform,omitempty"` @@ -2588,15 +1422,6 @@ type PlatformStatus struct { AWS *AWSPlatformStatus `json:"aws,omitempty"` } -// AWSPlatformStatus contains status specific to the AWS platform -type AWSPlatformStatus struct { - // DefaultWorkerSecurityGroupID is the ID of a security group created by - // the control plane operator. It is always added to worker machines in - // addition to any security groups specified in the NodePool. - // +optional - DefaultWorkerSecurityGroupID string `json:"defaultWorkerSecurityGroupID,omitempty"` -} - // ClusterVersionStatus reports the status of the cluster versioning, // including any upgrades that are in progress. The current field will // be set to whichever version the cluster is reconciling to, and the @@ -2675,6 +1500,9 @@ type ClusterConfiguration struct { // registries, and policies to block or allow registry hostnames. // When exposing OpenShift's image registry to the public, this also lets cluster // admins specify the external hostname. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + // TODO(alberto): elaborate why. + // +rollout // +optional Image *configv1.ImageSpec `json:"image,omitempty"` @@ -2708,6 +1536,12 @@ type ClusterConfiguration struct { Scheduler *configv1.SchedulerSpec `json:"scheduler,omitempty"` // Proxy holds cluster-wide information on how to configure default proxies for the cluster. + // This affects traffic flowing from the hosted cluster data plane. + // The controllers will generate a machineConfig with the proxy config for the cluster. + // This MachineConfig will be part of every payload generated by the controllers for any NodePool of the HostedCluster. + // Changing this value will trigger a rollout for all existing NodePools in the cluster. + + // +rollout // +optional Proxy *configv1.ProxySpec `json:"proxy,omitempty"` } @@ -2741,7 +1575,7 @@ type HostedCluster struct { Status HostedClusterStatus `json:"status,omitempty"` } -// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // HostedClusterList contains a list of HostedCluster type HostedClusterList struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/ibmcloud.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/ibmcloud.go new file mode 100644 index 000000000..e69ffc431 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/ibmcloud.go @@ -0,0 +1,68 @@ +package v1beta1 + +import corev1 "k8s.io/api/core/v1" + +// IBMCloudKMSSpec defines metadata for the IBM Cloud KMS encryption strategy +type IBMCloudKMSSpec struct { + // Region is the IBM Cloud region + Region string `json:"region"` + // Auth defines metadata for how authentication is done with IBM Cloud KMS + Auth IBMCloudKMSAuthSpec `json:"auth"` + // KeyList defines the list of keys used for data encryption + KeyList []IBMCloudKMSKeyEntry `json:"keyList"` +} + +// IBMCloudKMSKeyEntry defines metadata for an IBM Cloud KMS encryption key +type IBMCloudKMSKeyEntry struct { + // CRKID is the customer rook key id + CRKID string `json:"crkID"` + // InstanceID is the id for the key protect instance + InstanceID string `json:"instanceID"` + // CorrelationID is an identifier used to track all api call usage from hypershift + CorrelationID string `json:"correlationID"` + // URL is the url to call key protect apis over + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` + // KeyVersion is a unique number associated with the key. The number increments whenever a new + // key is enabled for data encryption. + KeyVersion int `json:"keyVersion"` +} + +// IBMCloudKMSAuthSpec defines metadata for how authentication is done with IBM Cloud KMS +type IBMCloudKMSAuthSpec struct { + // Type defines the IBM Cloud KMS authentication strategy + // +unionDiscriminator + Type IBMCloudKMSAuthType `json:"type"` + // Unmanaged defines the auth metadata the customer provides to interact with IBM Cloud KMS + // +optional + Unmanaged *IBMCloudKMSUnmanagedAuthSpec `json:"unmanaged,omitempty"` + // Managed defines metadata around the service to service authentication strategy for the IBM Cloud + // KMS system (all provider managed). + // +optional + Managed *IBMCloudKMSManagedAuthSpec `json:"managed,omitempty"` +} + +// IBMCloudKMSAuthType defines the IBM Cloud KMS authentication strategy +// +kubebuilder:validation:Enum=Managed;Unmanaged +type IBMCloudKMSAuthType string + +const ( + // IBMCloudKMSManagedAuth defines the KMS authentication strategy where the IKS/ROKS platform uses + // service to service auth to call IBM Cloud KMS APIs (no customer credentials requried) + IBMCloudKMSManagedAuth IBMCloudKMSAuthType = "Managed" + // IBMCloudKMSUnmanagedAuth defines the KMS authentication strategy where a customer supplies IBM Cloud + // authentication to interact with IBM Cloud KMS APIs + IBMCloudKMSUnmanagedAuth IBMCloudKMSAuthType = "Unmanaged" +) + +// IBMCloudKMSUnmanagedAuthSpec defines the auth metadata the customer provides to interact with IBM Cloud KMS +type IBMCloudKMSUnmanagedAuthSpec struct { + // Credentials should reference a secret with a key field of IBMCloudIAMAPIKeySecretKey that contains a apikey to + // call IBM Cloud KMS APIs + Credentials corev1.LocalObjectReference `json:"credentials"` +} + +// IBMCloudKMSManagedAuthSpec defines metadata around the service to service authentication strategy for the IBM Cloud +// KMS system (all provider managed). +type IBMCloudKMSManagedAuthSpec struct { +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/kubevirt.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/kubevirt.go new file mode 100644 index 000000000..340a27f3a --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/kubevirt.go @@ -0,0 +1,382 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + QoSClassBurstable QoSClass = "Burstable" + QoSClassGuaranteed QoSClass = "Guaranteed" +) + +type QoSClass string + +// KubevirtCompute contains values associated with the virtual compute hardware requested for the VM. +type KubevirtCompute struct { + // Memory represents how much guest memory the VM should have + // + // +optional + // +kubebuilder:default="8Gi" + Memory *resource.Quantity `json:"memory"` + + // Cores represents how many cores the guest VM should have + // + // +optional + // +kubebuilder:default=2 + Cores *uint32 `json:"cores"` + + // QosClass If set to "Guaranteed", requests the scheduler to place the VirtualMachineInstance on a node with + // limit memory and CPU, equal to be the requested values, to set the VMI as a Guaranteed QoS Class; + // See here for more details: + // https://kubevirt.io/user-guide/operations/node_overcommit/#requesting-the-right-qos-class-for-virtualmachineinstances + // + // +optional + // +kubebuilder:validation:Enum=Burstable;Guaranteed + // +kubebuilder:default=Burstable + QosClass *QoSClass `json:"qosClass,omitempty"` +} + +// +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany;ReadOnly;ReadWriteOncePod +type PersistentVolumeAccessMode corev1.PersistentVolumeAccessMode + +// KubevirtPersistentVolume contains the values involved with provisioning persistent storage for a KubeVirt VM. +type KubevirtPersistentVolume struct { + // Size is the size of the persistent storage volume + // + // +optional + // +kubebuilder:default="32Gi" + Size *resource.Quantity `json:"size"` + // StorageClass is the storageClass used for the underlying PVC that hosts the volume + // + // +optional + StorageClass *string `json:"storageClass,omitempty"` + // AccessModes is an array that contains the desired Access Modes the root volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes + // + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + // VolumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // +optional + // +kubebuilder:validation:Enum=Filesystem;Block + VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"` +} + +// KubevirtCachingStrategyType is the type of the boot image caching mechanism for the KubeVirt provider +type KubevirtCachingStrategyType string + +const ( + // KubevirtCachingStrategyNone means that hypershift will not cache the boot image + KubevirtCachingStrategyNone KubevirtCachingStrategyType = "None" + + // KubevirtCachingStrategyPVC means that hypershift will cache the boot image into a PVC; only relevant when using + // a QCOW boot image, and is ignored when using a container image + KubevirtCachingStrategyPVC KubevirtCachingStrategyType = "PVC" +) + +// KubevirtCachingStrategy defines the boot image caching strategy +type KubevirtCachingStrategy struct { + // Type is the type of the caching strategy + // +kubebuilder:default=None + // +kubebuilder:validation:Enum=None;PVC + Type KubevirtCachingStrategyType `json:"type"` +} + +// KubevirtRootVolume represents the volume that the rhcos disk will be stored and run from. +type KubevirtRootVolume struct { + // Image represents what rhcos image to use for the node pool + // + // +optional + Image *KubevirtDiskImage `json:"diskImage,omitempty"` + + // KubevirtVolume represents of type of storage to run the image on + KubevirtVolume `json:",inline"` + + // CacheStrategy defines the boot image caching strategy. Default - no caching + // +optional + CacheStrategy *KubevirtCachingStrategy `json:"cacheStrategy,omitempty"` +} + +// KubevirtVolumeType is a specific supported KubeVirt volumes +// +// +kubebuilder:validation:Enum=Persistent +type KubevirtVolumeType string + +const ( + // KubevirtVolumeTypePersistent represents persistent volume for kubevirt VMs + KubevirtVolumeTypePersistent KubevirtVolumeType = "Persistent" +) + +// KubevirtVolume represents what kind of storage to use for a KubeVirt VM volume +type KubevirtVolume struct { + // Type represents the type of storage to associate with the kubevirt VMs. + // + // +optional + // +unionDiscriminator + // +kubebuilder:default=Persistent + Type KubevirtVolumeType `json:"type"` + + // Persistent volume type means the VM's storage is backed by a PVC + // VMs that use persistent volumes can survive disruption events like restart and eviction + // This is the default type used when no storage type is defined. + // + // +optional + Persistent *KubevirtPersistentVolume `json:"persistent,omitempty"` +} + +// KubevirtDiskImage contains values representing where the rhcos image is located +type KubevirtDiskImage struct { + // ContainerDiskImage is a string representing the container image that holds the root disk + // + // +optional + ContainerDiskImage *string `json:"containerDiskImage,omitempty"` +} + +type MultiQueueSetting string + +const ( + MultiQueueEnable MultiQueueSetting = "Enable" + MultiQueueDisable MultiQueueSetting = "Disable" +) + +// KubevirtNodePoolPlatform specifies the configuration of a NodePool when operating +// on KubeVirt platform. +type KubevirtNodePoolPlatform struct { + // RootVolume represents values associated with the VM volume that will host rhcos + // +kubebuilder:default={persistent: {size: "32Gi"}, type: "Persistent"} + RootVolume *KubevirtRootVolume `json:"rootVolume"` + + // Compute contains values representing the virtual hardware requested for the VM + // + // +optional + // +kubebuilder:default={memory: "8Gi", cores: 2} + Compute *KubevirtCompute `json:"compute"` + + // NetworkInterfaceMultiQueue If set to "Enable", virtual network interfaces configured with a virtio bus will also + // enable the vhost multiqueue feature for network devices. The number of queues created depends on additional + // factors of the VirtualMachineInstance, like the number of guest CPUs. + // + // +optional + // +kubebuilder:validation:Enum=Enable;Disable + // +kubebuilder:default=Enable + NetworkInterfaceMultiQueue *MultiQueueSetting `json:"networkInterfaceMultiqueue,omitempty"` + + // AdditionalNetworks specify the extra networks attached to the nodes + // + // +optional + AdditionalNetworks []KubevirtNetwork `json:"additionalNetworks,omitempty"` + + // AttachDefaultNetwork specify if the default pod network should be attached to the nodes + // this can only be set to false if AdditionalNetworks are configured + // + // +optional + // +kubebuilder:default=true + AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` + + // NodeSelector is a selector which must be true for the kubevirt VirtualMachine to fit on a node. + // Selector which must match a node's labels for the VM to be scheduled on that node. More info: + // https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // KubevirtHostDevices specifies the host devices (e.g. GPU devices) to be passed + // from the management cluster, to the nodepool nodes + KubevirtHostDevices []KubevirtHostDevice `json:"hostDevices,omitempty"` +} + +// KubevirtNetwork specifies the configuration for a virtual machine +// network interface +type KubevirtNetwork struct { + // Name specify the network attached to the nodes + // it is a value with the format "[namespace]/[name]" to reference the + // multus network attachment definition + Name string `json:"name"` +} + +type KubevirtHostDevice struct { + // DeviceName is the name of the host device that is desired to be utilized in the HostedCluster's NodePool + // The device can be any supported PCI device, including GPU, either as a passthrough or a vGPU slice. + DeviceName string `json:"deviceName"` + + // Count is the number of instances the specified host device will be attached to each of the + // NodePool's nodes. Default is 1. + // + // +optional + // +kubebuilder:default=1 + // +kubebuilder:validation:Minimum=1 + Count int `json:"count,omitempty"` +} + +// KubeVirtNodePoolStatus contains the KubeVirt platform statuses +type KubeVirtNodePoolStatus struct { + // CacheName holds the name of the cache DataVolume, if exists + // +optional + CacheName string `json:"cacheName,omitempty"` + + // Credentials shows the client credentials used when creating KubeVirt virtual machines. + // This filed is only exists when the KubeVirt virtual machines are being placed + // on a cluster separate from the one hosting the Hosted Control Plane components. + // + // The default behavior when Credentials is not defined is for the KubeVirt VMs to be placed on + // the same cluster and namespace as the Hosted Control Plane. + // +optional + Credentials *KubevirtPlatformCredentials `json:"credentials,omitempty"` +} +type KubevirtPlatformCredentials struct { + // InfraKubeConfigSecret is a reference to a secret that contains the kubeconfig for the external infra cluster + // that will be used to host the KubeVirt virtual machines for this cluster. + // + // +immutable + // +kubebuilder:validation:Required + // +required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="infraKubeConfigSecret is immutable" + InfraKubeConfigSecret *KubeconfigSecretRef `json:"infraKubeConfigSecret,omitempty"` + + // InfraNamespace defines the namespace on the external infra cluster that is used to host the KubeVirt + // virtual machines. This namespace must already exist before creating the HostedCluster and the kubeconfig + // referenced in the InfraKubeConfigSecret must have access to manage the required resources within this + // namespace. + // + // +immutable + // +kubebuilder:validation:Required + // +required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="infraNamespace is immutable" + InfraNamespace string `json:"infraNamespace"` +} + +// KubevirtPlatformSpec specifies configuration for kubevirt guest cluster installations +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.generateID) || has(self.generateID)", message="Kubevirt GenerateID is required once set" +type KubevirtPlatformSpec struct { + // BaseDomainPassthrough toggles whether or not an automatically + // generated base domain for the guest cluster should be used that + // is a subdomain of the management cluster's *.apps DNS. + // + // For the KubeVirt platform, the basedomain can be autogenerated using + // the *.apps domain of the management/infra hosting cluster + // This makes the guest cluster's base domain a subdomain of the + // hypershift infra/mgmt cluster's base domain. + // + // Example: + // Infra/Mgmt cluster's DNS + // Base: example.com + // Cluster: mgmt-cluster.example.com + // Apps: *.apps.mgmt-cluster.example.com + // KubeVirt Guest cluster's DNS + // Base: apps.mgmt-cluster.example.com + // Cluster: guest.apps.mgmt-cluster.example.com + // Apps: *.apps.guest.apps.mgmt-cluster.example.com + // + // This is possible using OCP wildcard routes + // + // +optional + // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="baseDomainPassthrough is immutable" + BaseDomainPassthrough *bool `json:"baseDomainPassthrough,omitempty"` + + // GenerateID is used to uniquely apply a name suffix to resources associated with + // kubevirt infrastructure resources + // +kubebuilder:validation:Optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Kubevirt GenerateID is immutable once set" + // +kubebuilder:validation:MaxLength=11 + // +optional + GenerateID string `json:"generateID,omitempty"` + // Credentials defines the client credentials used when creating KubeVirt virtual machines. + // Defining credentials is only necessary when the KubeVirt virtual machines are being placed + // on a cluster separate from the one hosting the Hosted Control Plane components. + // + // The default behavior when Credentials is not defined is for the KubeVirt VMs to be placed on + // the same cluster and namespace as the Hosted Control Plane. + // +optional + Credentials *KubevirtPlatformCredentials `json:"credentials,omitempty"` + + // StorageDriver defines how the KubeVirt CSI driver exposes StorageClasses on + // the infra cluster (hosting the VMs) to the guest cluster. + // + // +kubebuilder:validation:Optional + // +optional + // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver is immutable" + StorageDriver *KubevirtStorageDriverSpec `json:"storageDriver,omitempty"` +} + +// KubevirtStorageDriverConfigType defines how the kubevirt storage driver is configured. +// +// +kubebuilder:validation:Enum=None;Default;Manual +type KubevirtStorageDriverConfigType string + +const ( + // NoneKubevirtStorageDriverConfigType means no kubevirt storage driver is used + NoneKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "None" + + // DefaultKubevirtStorageDriverConfigType means the kubevirt storage driver maps to the + // underlying infra cluster's default storageclass + DefaultKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "Default" + + // ManualKubevirtStorageDriverConfigType means the kubevirt storage driver mapping is + // explicitly defined. + ManualKubevirtStorageDriverConfigType KubevirtStorageDriverConfigType = "Manual" +) + +type KubevirtStorageDriverSpec struct { + // Type represents the type of kubevirt csi driver configuration to use + // + // +unionDiscriminator + // +immutable + // +kubebuilder:default=Default + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver.Type is immutable" + Type KubevirtStorageDriverConfigType `json:"type,omitempty"` + + // Manual is used to explicilty define how the infra storageclasses are + // mapped to guest storageclasses + // + // +immutable + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageDriver.Manual is immutable" + Manual *KubevirtManualStorageDriverConfig `json:"manual,omitempty"` +} + +type KubevirtManualStorageDriverConfig struct { + // StorageClassMapping maps StorageClasses on the infra cluster hosting + // the KubeVirt VMs to StorageClasses that are made available within the + // Guest Cluster. + // + // NOTE: It is possible that not all capablities of an infra cluster's + // storageclass will be present for the corresponding guest clusters storageclass. + // + // +optional + // +immutable + // +kubebuilder:validation:Optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="storageClassMapping is immutable" + StorageClassMapping []KubevirtStorageClassMapping `json:"storageClassMapping,omitempty"` + + // +optional + // +immutable + // +kubebuilder:validation:Optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="volumeSnapshotClassMapping is immutable" + VolumeSnapshotClassMapping []KubevirtVolumeSnapshotClassMapping `json:"volumeSnapshotClassMapping,omitempty"` +} + +type KubevirtStorageClassMapping struct { + // Group contains which group this mapping belongs to. + Group string `json:"group,omitempty"` + // InfraStorageClassName is the name of the infra cluster storage class that + // will be exposed to the guest. + InfraStorageClassName string `json:"infraStorageClassName"` + // GuestStorageClassName is the name that the corresponding storageclass will + // be called within the guest cluster + GuestStorageClassName string `json:"guestStorageClassName"` +} + +type KubevirtVolumeSnapshotClassMapping struct { + // Group contains which group this mapping belongs to. + Group string `json:"group,omitempty"` + // InfraStorageClassName is the name of the infra cluster volume snapshot class that + // will be exposed to the guest. + InfraVolumeSnapshotClassName string `json:"infraVolumeSnapshotClassName"` + // GuestVolumeSnapshotClassName is the name that the corresponding volumeSnapshotClass will + // be called within the guest cluster + GuestVolumeSnapshotClassName string `json:"guestVolumeSnapshotClassName"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_conditions.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_conditions.go index 74bb53694..4a057d665 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_conditions.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_conditions.go @@ -68,6 +68,36 @@ const ( // NodePoolClusterNetworkCIDRConflictType signals if a NodePool's machine objects are colliding with the // cluster network's CIDR range. This can indicate why some network functionality might be degraded. NodePoolClusterNetworkCIDRConflictType = "ClusterNetworkCIDRConflict" + + // KubeVirtNodesLiveMigratable indicates if all (VirtualMachines) nodes of the kubevirt + // hosted cluster can be live migrated without experiencing a node restart + NodePoolKubeVirtLiveMigratableType = "KubeVirtNodesLiveMigratable" +) + +// PerformanceProfile Conditions +const ( + + // NodePoolPerformanceProfileTuningConditionTypePrefix is a common prefix to all PerformanceProfile + // status conditions reported by NTO + NodePoolPerformanceProfileTuningConditionTypePrefix = "performance.operator.openshift.io" + + // NodePoolPerformanceProfileTuningAvailableConditionType signals that the PerformanceProfile associated with the + // NodePool is available and its tunings were being applied successfully. + NodePoolPerformanceProfileTuningAvailableConditionType = NodePoolPerformanceProfileTuningConditionTypePrefix + "/Available" + + // NodePoolPerformanceProfileTuningProgressingConditionType signals that the PerformanceProfile associated with the + // NodePool is in the middle of its tuning processing and its in progressing state. + NodePoolPerformanceProfileTuningProgressingConditionType = NodePoolPerformanceProfileTuningConditionTypePrefix + "/Progressing" + + // NodePoolPerformanceProfileTuningUpgradeableConditionType signals that it's safe to + // upgrade the PerformanceProfile operator component + NodePoolPerformanceProfileTuningUpgradeableConditionType = NodePoolPerformanceProfileTuningConditionTypePrefix + "/Upgradeable" + + // NodePoolPerformanceProfileTuningDegradedConditionType signals that the PerformanceProfile associated with the + // NodePool is failed to apply its tuning. + // This is usually happening because more lower-level components failed to apply successfully, like + // MachineConfig or KubeletConfig + NodePoolPerformanceProfileTuningDegradedConditionType = NodePoolPerformanceProfileTuningConditionTypePrefix + "/Degraded" ) // Reasons @@ -85,4 +115,5 @@ const ( InvalidKubevirtMachineTemplate = "InvalidKubevirtMachineTemplate" InvalidOpenStackMachineTemplate = "InvalidOpenStackMachineTemplate" CIDRConflictReason = "CIDRConflict" + NodePoolKubeVirtLiveMigratableReason = "KubeVirtNodesNotLiveMigratable" ) diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_types.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_types.go index c85e55a37..c0f4a2c4c 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_types.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/nodepool_types.go @@ -5,19 +5,19 @@ import ( "strings" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - - "github.com/openshift/hypershift/api/ibmcapi" ) const ( + // TODO - for the next API bump; the case on these needs to be changed. + // See https://github.com/openshift/hypershift/pull/4538#discussion_r1765165827 for more details. ArchitectureAMD64 = "amd64" ArchitectureS390X = "s390x" ArchitecturePPC64LE = "ppc64le" ArchitectureARM64 = "arm64" + ArchitectureMulti = "multi" // NodePoolLabel is used to label Nodes. NodePoolLabel = "hypershift.openshift.io/nodePool" @@ -79,44 +79,53 @@ type NodePool struct { // NodePoolSpec is the desired behavior of a NodePool. // +kubebuilder:validation:XValidation:rule="!has(oldSelf.arch) || has(self.arch)", message="Arch is required once set" -// +kubebuilder:validation:XValidation:rule="self.arch != 'arm64' || has(self.platform.aws)", message="Setting Arch to arm64 is only supported for AWS" +// +kubebuilder:validation:XValidation:rule="self.arch != 'arm64' || has(self.platform.aws) || has(self.platform.azure)", message="Setting Arch to arm64 is only supported for AWS and Azure" // +kubebuilder:validation:XValidation:rule="!has(self.replicas) || !has(self.autoScaling)", message="Both replicas or autoScaling should not be set" type NodePoolSpec struct { - // ClusterName is the name of the HostedCluster this NodePool belongs to. - // - // TODO(dan): Should this be a LocalObjectReference? - // + // clusterName is the name of the HostedCluster this NodePool belongs to. + // If a HostedCluster with this name doesn't exist, the controller will no-op until it exists. // +immutable // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="ClusterName is immutable" + // +required + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')",message="clusterName must consist of lowercase alphanumeric characters or '-', start and end with an alphanumeric character, and be between 1 and 253 characters" ClusterName string `json:"clusterName"` - // Release specifies the OCP release used for the NodePool. This informs the - // ignition configuration for machines, as well as other platform specific + // release specifies the OCP release used for the NodePool. This informs the + // ignition configuration for machines which includes the kubelet version, as well as other platform specific // machine properties (e.g. an AMI on the AWS platform). + // It's not supported to use a release in a NodePool which minor version skew against the Control Plane release is bigger than N-2. Although there's no enforcement that prevents this from happening. + // Attempting to use a release with a bigger skew might result in unpredictable behaviour. + // Attempting to use a release higher than the HosterCluster one will result in the NodePool being degraded and the ValidReleaseImage condition being false. + // Attempting to use a release lower than the current NodePool y-stream will result in the NodePool being degraded and the ValidReleaseImage condition being false. + // Changing this field will trigger a NodePool rollout. + // +rollout + // +required Release Release `json:"release"` - // Platform specifies the underlying infrastructure provider for the NodePool + // platform specifies the underlying infrastructure provider for the NodePool // and is used to configure platform specific behavior. - // - // +immutable + // +required Platform NodePoolPlatform `json:"platform"` - // Replicas is the desired number of nodes the pool should maintain. If - // unset, the default value is 0. - // + // replicas is the desired number of nodes the pool should maintain. If unset, the controller default value is 0. + // replicas is mutually exclusive with autoscaling. If autoscaling is configured, replicas must be omitted and autoscaling will control the NodePool size internally. // +optional Replicas *int32 `json:"replicas,omitempty"` - // Management specifies behavior for managing nodes in the pool, such as + // management specifies behavior for managing nodes in the pool, such as // upgrade strategies and auto-repair behaviors. + // +required Management NodePoolManagement `json:"management"` - // Autoscaling specifies auto-scaling behavior for the NodePool. + // autoscaling specifies auto-scaling behavior for the NodePool. + // autoscaling is mutually exclusive with replicas. If replicas is set, this field must be ommited. // // +optional AutoScaling *NodePoolAutoScaling `json:"autoScaling,omitempty"` - // Config is a list of references to ConfigMaps containing serialized + // config is a list of references to ConfigMaps containing serialized // MachineConfig resources to be injected into the ignition configurations of // nodes in the NodePool. The MachineConfig API schema is defined here: // @@ -124,53 +133,55 @@ type NodePoolSpec struct { // // Each ConfigMap must have a single key named "config" whose value is the YML // with one or more serialized machineconfiguration.openshift.io resources: - // KubeletConfig - // ContainerRuntimeConfig - // MachineConfig - // ClusterImagePolicy - // ImageContentSourcePolicy - // or - // ImageDigestMirrorSet // + // * KubeletConfig + // * ContainerRuntimeConfig + // * MachineConfig + // * ClusterImagePolicy + // * ImageContentSourcePolicy + // * ImageDigestMirrorSet + // + // This is validated in the backend and signaled back via validMachineConfig condition. + // Changing this field will trigger a NodePool rollout. + // +rollout // +kubebuilder:validation:Optional Config []corev1.LocalObjectReference `json:"config,omitempty"` - // NodeDrainTimeout is the maximum amount of time that the controller will spend on draining a node. - // The default value is 0, meaning that the node can be drained without any time limitations. - // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` - // TODO (alberto): Today changing this field will trigger a recreate rolling update, which kind of defeats - // the purpose of the change. In future we plan to propagate this field in-place. - // https://github.com/kubernetes-sigs/cluster-api/issues/5880 / https://github.com/kubernetes-sigs/cluster-api/pull/10589 + // nodeDrainTimeout is the maximum amount of time that the controller will spend on retrying to drain a node until it succeeds. + // The default value is 0, meaning that the node can retry drain without any time limitations. + // Changing this field propagate inplace into existing Nodes. // +optional NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` - // NodeVolumeDetachTimeout is the maximum amount of time that the controller will spend on detaching volumes from a node. + // nodeVolumeDetachTimeout is the maximum amount of time that the controller will spend on detaching volumes from a node. // The default value is 0, meaning that the volumes will be detached from the node without any time limitations. - // After the timeout, the detachment of volumes that haven't been detached yet is skipped. - // TODO (cbusse): Same comment as Alberto's for `NodeDrainTimeout`: - // Today changing this field will trigger a recreate rolling update, which kind of defeats - // the purpose of the change. In future we plan to propagate this field in-place. - // https://github.com/kubernetes-sigs/cluster-api/issues/5880 / https://github.com/kubernetes-sigs/cluster-api/pull/10589 + // After the timeout, any remaining attached volumes will be ignored and the removal of the machine will continue. + // Changing this field propagate inplace into existing Nodes. // +optional NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` - // NodeLabels propagates a list of labels to Nodes, only once on creation. + // nodeLabels propagates a list of labels to Nodes, only once on creation. // Valid values are those in https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set // +optional NodeLabels map[string]string `json:"nodeLabels,omitempty"` - // Taints if specified, propagates a list of taints to Nodes, only once on creation. + // taints if specified, propagates a list of taints to Nodes, only once on creation. + // These taints are additive to the ones applied by other controllers + // +kubebuilder:validation:MaxItems=50 // +optional Taints []Taint `json:"taints,omitempty"` - // PausedUntil is a field that can be used to pause reconciliation on a resource. - // Either a date can be provided in RFC3339 format or a boolean. If a date is + // pausedUntil is a field that can be used to pause reconciliation on the NodePool controller. Resulting in any change to the NodePool being ignored. + // Either a date can be provided in RFC3339 format or a boolean as in 'true', 'false', 'True', 'False'. If a date is // provided: reconciliation is paused on the resource until that date. If the boolean true is // provided: reconciliation is paused on the resource until the field is removed. + // +kubebuilder:validation:MaxLength=35 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.*$') || self in ['true', 'false', 'True', 'False']`,message="PausedUntil must be a date in RFC3339 format or 'True', 'true', 'False' or 'false'" // +optional PausedUntil *string `json:"pausedUntil,omitempty"` - // TuningConfig is a list of references to ConfigMaps containing serialized + // tuningConfig is a list of references to ConfigMaps containing serialized // Tuned or PerformanceProfile resources to define the tuning configuration to be applied to // nodes in the NodePool. The Tuned API is defined here: // @@ -181,11 +192,12 @@ type NodePoolSpec struct { // // Each ConfigMap must have a single key named "tuning" whose value is the // JSON or YAML of a serialized Tuned or PerformanceProfile. + // Changing this field will trigger a NodePool rollout. // +kubebuilder:validation:Optional TuningConfig []corev1.LocalObjectReference `json:"tuningConfig,omitempty"` - // Arch is the preferred processor architecture for the NodePool (currently only supported on AWS) - // NOTE: This is set as optional to prevent validation from failing due to a limitation on client side validation with open API machinery: + // arch is the preferred processor architecture for the NodePool. Different platforms might have different supported architectures. + // TODO: This is set as optional to prevent validation from failing due to a limitation on client side validation with open API machinery: // https://github.com/kubernetes/kubernetes/issues/108768#issuecomment-1253912215 // TODO Add s390x to enum validation once the architecture is supported // @@ -219,8 +231,7 @@ type NodePoolStatus struct { } // NodePoolList contains a list of NodePools. -// -// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type NodePoolList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` @@ -274,14 +285,16 @@ const ( // ReplaceUpgrade specifies upgrade behavior that replaces existing nodes // according to a given strategy. +// +kubebuilder:validation:XValidation:rule="!has(self.rollingUpdate) || self.strategy == 'RollingUpdate'", message="The 'rollingUpdate' field can only be set when 'strategy' is 'RollingUpdate'" type ReplaceUpgrade struct { - // Strategy is the node replacement strategy for nodes in the pool. - // + // strategy is the node replacement strategy for nodes in the pool. + // In can be either "RollingUpdate" or "OnDelete". RollingUpdate will rollout Nodes honoring maxSurge and maxUnavailable. + // OnDelete provide more granular control and will replace nodes as the old ones are manually deleted. // +kubebuilder:validation:Optional // +kubebuilder:validation:Enum=RollingUpdate;OnDelete Strategy UpgradeStrategy `json:"strategy"` - // RollingUpdate specifies a rolling update strategy which upgrades nodes by + // rollingUpdate specifies a rolling update strategy which upgrades nodes by // creating new nodes and deleting the old ones. // // +kubebuilder:validation:Optional @@ -291,7 +304,7 @@ type ReplaceUpgrade struct { // RollingUpdate specifies a rolling update strategy which upgrades nodes by // creating new nodes and deleting the old ones. type RollingUpdate struct { - // MaxUnavailable is the maximum number of nodes that can be unavailable + // maxUnavailable is the maximum number of nodes that can be unavailable // during the update. // // Value can be an absolute number (ex: 5) or a percentage of desired nodes @@ -312,7 +325,7 @@ type RollingUpdate struct { // +optional MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` - // MaxSurge is the maximum number of nodes that can be provisioned above the + // maxSurge is the maximum number of nodes that can be provisioned above the // desired number of nodes. // // Value can be an absolute number (ex: 5) or a percentage of desired nodes @@ -337,7 +350,7 @@ type RollingUpdate struct { // InPlaceUpgrade specifies an upgrade strategy which upgrades nodes in-place // without any new nodes being created or any old nodes being deleted. type InPlaceUpgrade struct { - // MaxUnavailable is the maximum number of nodes that can be unavailable + // maxUnavailable is the maximum number of nodes that can be unavailable // during the update. // // Value can be an absolute number (ex: 5) or a percentage of desired nodes @@ -359,27 +372,35 @@ type InPlaceUpgrade struct { // NodePoolManagement specifies behavior for managing nodes in a NodePool, such // as upgrade strategies and auto-repair behaviors. +// +kubebuilder:validation:XValidation:rule="!has(self.inPlace) || self.upgradeType == 'InPlace'", message="The 'inPlace' field can only be set when 'upgradeType' is 'InPlace'" type NodePoolManagement struct { - // UpgradeType specifies the type of strategy for handling upgrades. + // upgradeType specifies the type of strategy for handling upgrades. + // This can be either "Replace" or "InPlace". + // "Replace" will update Nodes by recreating the underlying instances. + // "InPlace" will update Nodes by applying changes to the existing instances. This might or might not result in a reboot. // // +kubebuilder:validation:Enum=Replace;InPlace // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="UpgradeType is immutable" + // +required UpgradeType UpgradeType `json:"upgradeType"` - // Replace is the configuration for rolling upgrades. + // replace is the configuration for rolling upgrades. + // It defaults to a RollingUpdate strategy with maxSurge of 1 and maxUnavailable of 0. // // +kubebuilder:validation:Optional // +kubebuilder:default={strategy: "RollingUpdate", rollingUpdate: {maxSurge: 1, maxUnavailable: 0 }} Replace *ReplaceUpgrade `json:"replace,omitempty"` - // InPlace is the configuration for in-place upgrades. + // inPlace is the configuration for in-place upgrades. // // +kubebuilder:validation:Optional InPlace *InPlaceUpgrade `json:"inPlace,omitempty"` - // AutoRepair specifies whether health checks should be enabled for machines - // in the NodePool. The default is false. - // + // autoRepair specifies whether health checks should be enabled for machines in the NodePool. The default is false. + // Enabling this feature will cause the controller to automatically delete unhealthy machines. + // The unhealthy criteria is reserved for the controller implementation and subject to change. + // But generally it's determined by checking the Node ready condition is true and a timeout that might vary depending on the platform provider. + // AutoRepair will no-op when more than 2 Nodes are unhealthy at the same time. Giving time for the cluster to stabilize or to the user to manually intervene. // +optional // +kubebuilder:default=false AutoRepair bool `json:"autoRepair"` @@ -388,12 +409,12 @@ type NodePoolManagement struct { // NodePoolAutoScaling specifies auto-scaling behavior for a NodePool. // +kubebuilder:validation:XValidation:rule="self.max >= self.min", message="max must be equal or greater than min" type NodePoolAutoScaling struct { - // Min is the minimum number of nodes to maintain in the pool. Must be >= 1. + // Min is the minimum number of nodes to maintain in the pool. Must be >= 1 and <= .Max. // // +kubebuilder:validation:Minimum=1 Min int32 `json:"min"` - // Max is the maximum number of nodes allowed in the pool. Must be >= 1. + // Max is the maximum number of nodes allowed in the pool. Must be >= 1 and >= Min. // // +kubebuilder:validation:Minimum=1 Max int32 `json:"max"` @@ -407,6 +428,8 @@ type NodePoolPlatform struct { // +unionDiscriminator // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Type is immutable" // +immutable + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=AWS;Azure;IBMCloud;KubeVirt;Agent;PowerVS;None + // +openshift:validation:FeatureGateAwareEnum:featureGate=OpenStack,enum=AWS;Azure;IBMCloud;KubeVirt;Agent;PowerVS;None;OpenStack Type PlatformType `json:"type"` // AWS specifies the configuration used when operating on AWS. @@ -435,568 +458,11 @@ type NodePoolPlatform struct { PowerVS *PowerVSNodePoolPlatform `json:"powervs,omitempty"` // OpenStack specifies the configuration used when using OpenStack platform. - // // +optional + // +openshift:enable:FeatureGate=OpenStack OpenStack *OpenStackNodePoolPlatform `json:"openstack,omitempty"` } -// PowerVSNodePoolProcType defines processor type to be used for PowerVSNodePoolPlatform -type PowerVSNodePoolProcType string - -func (p *PowerVSNodePoolProcType) String() string { - return string(*p) -} - -func (p *PowerVSNodePoolProcType) Set(s string) error { - switch s { - case string(PowerVSNodePoolSharedProcType), string(PowerVSNodePoolCappedProcType), string(PowerVSNodePoolDedicatedProcType): - *p = PowerVSNodePoolProcType(s) - return nil - default: - return fmt.Errorf("unknown processor type used %s", s) - } -} - -func (p *PowerVSNodePoolProcType) Type() string { - return "PowerVSNodePoolProcType" -} - -const ( - // PowerVSNodePoolDedicatedProcType defines dedicated processor type - PowerVSNodePoolDedicatedProcType = PowerVSNodePoolProcType("dedicated") - - // PowerVSNodePoolSharedProcType defines shared processor type - PowerVSNodePoolSharedProcType = PowerVSNodePoolProcType("shared") - - // PowerVSNodePoolCappedProcType defines capped processor type - PowerVSNodePoolCappedProcType = PowerVSNodePoolProcType("capped") -) - -func (p *PowerVSNodePoolProcType) CastToCAPIPowerVSProcessorType() ibmcapi.PowerVSProcessorType { - switch *p { - case PowerVSNodePoolDedicatedProcType: - return ibmcapi.PowerVSProcessorTypeDedicated - case PowerVSNodePoolCappedProcType: - return ibmcapi.PowerVSProcessorTypeCapped - default: - return ibmcapi.PowerVSProcessorTypeShared - } -} - -// PowerVSNodePoolStorageType defines storage type to be used for PowerVSNodePoolPlatform -type PowerVSNodePoolStorageType string - -// PowerVSNodePoolImageDeletePolicy defines image delete policy to be used for PowerVSNodePoolPlatform -type PowerVSNodePoolImageDeletePolicy string - -// PowerVSNodePoolPlatform specifies the configuration of a NodePool when operating -// on IBMCloud PowerVS platform. -type PowerVSNodePoolPlatform struct { - // SystemType is the System type used to host the instance. - // systemType determines the number of cores and memory that is available. - // Few of the supported SystemTypes are s922,e880,e980. - // e880 systemType available only in Dallas Datacenters. - // e980 systemType available in Datacenters except Dallas and Washington. - // When omitted, this means that the user has no opinion and the platform is left to choose a - // reasonable default. The current default is s922 which is generally available. - // - // +optional - // +kubebuilder:default=s922 - SystemType string `json:"systemType,omitempty"` - - // ProcessorType is the VM instance processor type. - // It must be set to one of the following values: Dedicated, Capped or Shared. - // - // Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. - // Shared: Shared among other clients. - // Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. - // - // if the processorType is selected as Dedicated, then Processors value cannot be fractional. - // When omitted, this means that the user has no opinion and the platform is left to choose a - // reasonable default. The current default is shared. - // - // +kubebuilder:default=shared - // +kubebuilder:validation:Enum=dedicated;shared;capped - // +optional - ProcessorType PowerVSNodePoolProcType `json:"processorType,omitempty"` - - // Processors is the number of virtual processors in a virtual machine. - // when the processorType is selected as Dedicated the processors value cannot be fractional. - // maximum value for the Processors depends on the selected SystemType. - // when SystemType is set to e880 or e980 maximum Processors value is 143. - // when SystemType is set to s922 maximum Processors value is 15. - // minimum value for Processors depends on the selected ProcessorType. - // when ProcessorType is set as Shared or Capped, The minimum processors is 0.5. - // when ProcessorType is set as Dedicated, The minimum processors is 1. - // When omitted, this means that the user has no opinion and the platform is left to choose a - // reasonable default. The default is set based on the selected ProcessorType. - // when ProcessorType selected as Dedicated, the default is set to 1. - // when ProcessorType selected as Shared or Capped, the default is set to 0.5. - // - // +optional - // +kubebuilder:default="0.5" - Processors intstr.IntOrString `json:"processors,omitempty"` - - // MemoryGiB is the size of a virtual machine's memory, in GiB. - // maximum value for the MemoryGiB depends on the selected SystemType. - // when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB. - // when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB. - // when SystemType is set to s922 maximum MemoryGiB value is 942 GiB. - // The minimum memory is 32 GiB. - // - // When omitted, this means the user has no opinion and the platform is left to choose a reasonable - // default. The current default is 32. - // - // +optional - // +kubebuilder:default=32 - MemoryGiB int32 `json:"memoryGiB,omitempty"` - - // Image used for deploying the nodes. If unspecified, the default - // is chosen based on the NodePool release payload image. - // - // +optional - Image *PowerVSResourceReference `json:"image,omitempty"` - - // StorageType for the image and nodes, this will be ignored if Image is specified. - // The storage tiers in PowerVS are based on I/O operations per second (IOPS). - // It means that the performance of your storage volumes is limited to the maximum number of IOPS based on volume size and storage tier. - // Although, the exact numbers might change over time, the Tier 3 storage is currently set to 3 IOPS/GB, and the Tier 1 storage is currently set to 10 IOPS/GB. - // - // The default is tier1 - // - // +kubebuilder:default=tier1 - // +kubebuilder:validation:Enum=tier1;tier3 - // +optional - StorageType PowerVSNodePoolStorageType `json:"storageType,omitempty"` - - // ImageDeletePolicy is policy for the image deletion. - // - // delete: delete the image from the infrastructure. - // retain: delete the image from the openshift but retain in the infrastructure. - // - // The default is delete - // - // +kubebuilder:default=delete - // +kubebuilder:validation:Enum=delete;retain - // +optional - ImageDeletePolicy PowerVSNodePoolImageDeletePolicy `json:"imageDeletePolicy,omitempty"` -} - -type QoSClass string - -const ( - QoSClassBurstable QoSClass = "Burstable" - QoSClassGuaranteed QoSClass = "Guaranteed" -) - -// KubevirtCompute contains values associated with the virtual compute hardware requested for the VM. -type KubevirtCompute struct { - // Memory represents how much guest memory the VM should have - // - // +optional - // +kubebuilder:default="8Gi" - Memory *resource.Quantity `json:"memory"` - - // Cores represents how many cores the guest VM should have - // - // +optional - // +kubebuilder:default=2 - Cores *uint32 `json:"cores"` - - // QosClass If set to "Guaranteed", requests the scheduler to place the VirtualMachineInstance on a node with - // limit memory and CPU, equal to be the requested values, to set the VMI as a Guaranteed QoS Class; - // See here for more details: - // https://kubevirt.io/user-guide/operations/node_overcommit/#requesting-the-right-qos-class-for-virtualmachineinstances - // - // +optional - // +kubebuilder:validation:Enum=Burstable;Guaranteed - // +kubebuilder:default=Burstable - QosClass *QoSClass `json:"qosClass,omitempty"` -} - -// +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany;ReadOnly;ReadWriteOncePod -type PersistentVolumeAccessMode corev1.PersistentVolumeAccessMode - -// KubevirtPersistentVolume contains the values involved with provisioning persistent storage for a KubeVirt VM. -type KubevirtPersistentVolume struct { - // Size is the size of the persistent storage volume - // - // +optional - // +kubebuilder:default="32Gi" - Size *resource.Quantity `json:"size"` - // StorageClass is the storageClass used for the underlying PVC that hosts the volume - // - // +optional - StorageClass *string `json:"storageClass,omitempty"` - // AccessModes is an array that contains the desired Access Modes the root volume should have. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes - // - // +optional - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` - // VolumeMode defines what type of volume is required by the claim. - // Value of Filesystem is implied when not included in claim spec. - // +optional - // +kubebuilder:validation:Enum=Filesystem;Block - VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"` -} - -// KubevirtCachingStrategyType is the type of the boot image caching mechanism for the KubeVirt provider -type KubevirtCachingStrategyType string - -const ( - // KubevirtCachingStrategyNone means that hypershift will not cache the boot image - KubevirtCachingStrategyNone KubevirtCachingStrategyType = "None" - - // KubevirtCachingStrategyPVC means that hypershift will cache the boot image into a PVC; only relevant when using - // a QCOW boot image, and is ignored when using a container image - KubevirtCachingStrategyPVC KubevirtCachingStrategyType = "PVC" -) - -// KubevirtCachingStrategy defines the boot image caching strategy -type KubevirtCachingStrategy struct { - // Type is the type of the caching strategy - // +kubebuilder:default=None - // +kubebuilder:validation:Enum=None;PVC - Type KubevirtCachingStrategyType `json:"type"` -} - -// KubevirtRootVolume represents the volume that the rhcos disk will be stored and run from. -type KubevirtRootVolume struct { - // Image represents what rhcos image to use for the node pool - // - // +optional - Image *KubevirtDiskImage `json:"diskImage,omitempty"` - - // KubevirtVolume represents of type of storage to run the image on - KubevirtVolume `json:",inline"` - - // CacheStrategy defines the boot image caching strategy. Default - no caching - // +optional - CacheStrategy *KubevirtCachingStrategy `json:"cacheStrategy,omitempty"` -} - -// KubevirtVolumeType is a specific supported KubeVirt volumes -// -// +kubebuilder:validation:Enum=Persistent -type KubevirtVolumeType string - -const ( - // KubevirtVolumeTypePersistent represents persistent volume for kubevirt VMs - KubevirtVolumeTypePersistent KubevirtVolumeType = "Persistent" -) - -// KubevirtVolume represents what kind of storage to use for a KubeVirt VM volume -type KubevirtVolume struct { - // Type represents the type of storage to associate with the kubevirt VMs. - // - // +optional - // +unionDiscriminator - // +kubebuilder:default=Persistent - Type KubevirtVolumeType `json:"type"` - - // Persistent volume type means the VM's storage is backed by a PVC - // VMs that use persistent volumes can survive disruption events like restart and eviction - // This is the default type used when no storage type is defined. - // - // +optional - Persistent *KubevirtPersistentVolume `json:"persistent,omitempty"` -} - -// KubevirtDiskImage contains values representing where the rhcos image is located -type KubevirtDiskImage struct { - // ContainerDiskImage is a string representing the container image that holds the root disk - // - // +optional - ContainerDiskImage *string `json:"containerDiskImage,omitempty"` -} - -type MultiQueueSetting string - -const ( - MultiQueueEnable MultiQueueSetting = "Enable" - MultiQueueDisable MultiQueueSetting = "Disable" -) - -// KubevirtNodePoolPlatform specifies the configuration of a NodePool when operating -// on KubeVirt platform. -type KubevirtNodePoolPlatform struct { - // RootVolume represents values associated with the VM volume that will host rhcos - // +kubebuilder:default={persistent: {size: "32Gi"}, type: "Persistent"} - RootVolume *KubevirtRootVolume `json:"rootVolume"` - - // Compute contains values representing the virtual hardware requested for the VM - // - // +optional - // +kubebuilder:default={memory: "8Gi", cores: 2} - Compute *KubevirtCompute `json:"compute"` - - // NetworkInterfaceMultiQueue If set to "Enable", virtual network interfaces configured with a virtio bus will also - // enable the vhost multiqueue feature for network devices. The number of queues created depends on additional - // factors of the VirtualMachineInstance, like the number of guest CPUs. - // - // +optional - // +kubebuilder:validation:Enum=Enable;Disable - // +kubebuilder:default=Enable - NetworkInterfaceMultiQueue *MultiQueueSetting `json:"networkInterfaceMultiqueue,omitempty"` - - // AdditionalNetworks specify the extra networks attached to the nodes - // - // +optional - AdditionalNetworks []KubevirtNetwork `json:"additionalNetworks,omitempty"` - - // AttachDefaultNetwork specify if the default pod network should be attached to the nodes - // this can only be set to false if AdditionalNetworks are configured - // - // +optional - // +kubebuilder:default=true - AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` - - // NodeSelector is a selector which must be true for the kubevirt VirtualMachine to fit on a node. - // Selector which must match a node's labels for the VM to be scheduled on that node. More info: - // https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - // - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // KubevirtHostDevices specifies the host devices (e.g. GPU devices) to be passed - // from the management cluster, to the nodepool nodes - KubevirtHostDevices []KubevirtHostDevice `json:"hostDevices,omitempty"` -} - -// KubevirtNetwork specifies the configuration for a virtual machine -// network interface -type KubevirtNetwork struct { - // Name specify the network attached to the nodes - // it is a value with the format "[namespace]/[name]" to reference the - // multus network attachment definition - Name string `json:"name"` -} - -type KubevirtHostDevice struct { - // DeviceName is the name of the host device that is desired to be utilized in the HostedCluster's NodePool - // The device can be any supported PCI device, including GPU, either as a passthrough or a vGPU slice. - DeviceName string `json:"deviceName"` - - // Count is the number of instances the specified host device will be attached to each of the - // NodePool's nodes. Default is 1. - // - // +optional - // +kubebuilder:default=1 - // +kubebuilder:validation:Minimum=1 - Count int `json:"count,omitempty"` -} - -// AWSNodePoolPlatform specifies the configuration of a NodePool when operating -// on AWS. -type AWSNodePoolPlatform struct { - // InstanceType is an ec2 instance type for node instances (e.g. m5.large). - InstanceType string `json:"instanceType"` - - // InstanceProfile is the AWS EC2 instance profile, which is a container for an IAM role that the EC2 instance uses. - InstanceProfile string `json:"instanceProfile,omitempty"` - - // +kubebuilder:validation:XValidation:rule="has(self.id) && self.id.startsWith('subnet-') ? !has(self.filters) : size(self.filters) > 0", message="subnet is invalid, a valid subnet id or filters must be set, but not both" - // +kubebuilder:validation:Required - // - // Subnet is the subnet to use for node instances. - Subnet AWSResourceReference `json:"subnet"` - - // AMI is the image id to use for node instances. If unspecified, the default - // is chosen based on the NodePool release payload image. - // - // +optional - AMI string `json:"ami,omitempty"` - - // SecurityGroups is an optional set of security groups to associate with node - // instances. - // - // +optional - SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"` - - // RootVolume specifies configuration for the root volume of node instances. - // - // +optional - RootVolume *Volume `json:"rootVolume,omitempty"` - - // ResourceTags is an optional list of additional tags to apply to AWS node - // instances. - // - // These will be merged with HostedCluster scoped tags, and HostedCluster tags - // take precedence in case of conflicts. - // - // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for - // information on tagging AWS resources. AWS supports a maximum of 50 tags per - // resource. OpenShift reserves 25 tags for its use, leaving 25 tags available - // for the user. - // - // +kubebuilder:validation:MaxItems=25 - // +optional - ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` -} - -// AWSResourceReference is a reference to a specific AWS resource by ID or filters. -// Only one of ID or Filters may be specified. Specifying more than one will result in -// a validation error. -type AWSResourceReference struct { - // ID of resource - // +optional - ID *string `json:"id,omitempty"` - - // Filters is a set of key/value pairs used to identify a resource - // They are applied according to the rules defined by the AWS API: - // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html - // +optional - Filters []Filter `json:"filters,omitempty"` -} - -// Filter is a filter used to identify an AWS resource -type Filter struct { - // Name of the filter. Filter names are case-sensitive. - Name string `json:"name"` - - // Values includes one or more filter values. Filter values are case-sensitive. - Values []string `json:"values"` -} - -// Volume specifies the configuration options for node instance storage devices. -type Volume struct { - // Size specifies size (in Gi) of the storage device. - // - // Must be greater than the image snapshot size or 8 (whichever is greater). - // - // +kubebuilder:validation:Minimum=8 - Size int64 `json:"size"` - - // Type is the type of the volume. - Type string `json:"type"` - - // IOPS is the number of IOPS requested for the disk. This is only valid - // for type io1. - // - // +optional - IOPS int64 `json:"iops,omitempty"` - - // Encrypted is whether the volume should be encrypted or not. - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Encrypted is immutable" - Encrypted *bool `json:"encrypted,omitempty"` - - // EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. - // If Encrypted is set and this is omitted, the default AWS key will be used. - // The key must already exist and be accessible by the controller. - // +optional - EncryptionKey string `json:"encryptionKey,omitempty"` -} - -// AgentNodePoolPlatform specifies the configuration of a NodePool when operating -// on the Agent platform. -type AgentNodePoolPlatform struct { - // AgentLabelSelector contains labels that must be set on an Agent in order to - // be selected for a Machine. - // +optional - AgentLabelSelector *metav1.LabelSelector `json:"agentLabelSelector,omitempty"` -} - -type OpenStackNodePoolPlatform struct { - // Flavor is the OpenStack flavor to use for the node instances. - // - // +kubebuilder:validation:Required - // +required - Flavor string `json:"flavor"` - - // ImageName is the OpenStack Glance image name to use for node instances. If unspecified, the default - // is chosen based on the NodePool release payload image. - // - // +optional - ImageName string `json:"imageName,omitempty"` -} - -type AzureNodePoolPlatform struct { - // VMSize is the Azure VM instance type to use for the nodes being created in the nodepool. - // - // +kubebuilder:validation:Required - // +required - VMSize string `json:"vmsize"` - - // ImageID is the id of the image to boot from. If unset, the default image at the location below will be used and - // is expected to exist: subscription//resourceGroups//providers/Microsoft.Compute/images/rhcos.x86_64.vhd. - // The and the are expected to be the same resource group documented in the - // Hosted Cluster specification respectively, HostedCluster.Spec.Platform.Azure.SubscriptionID and - // HostedCluster.Spec.Platform.Azure.ResourceGroupName. - // - // +optional - ImageID string `json:"imageID,omitempty"` - - // DiskSizeGB is the size in GB to assign to the OS disk - // CAPZ default is 30GB, https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/b3708019a67ff19407b87d63c402af94ca4246f6/api/v1beta1/types.go#L599 - // - // +kubebuilder:default:=30 - // +kubebuilder:validation:Minimum=16 - // +optional - DiskSizeGB int32 `json:"diskSizeGB,omitempty"` - - // DiskStorageAccountType is the disk storage account type to use. Valid values are: - // * Standard_LRS: HDD - // * StandardSSD_LRS: Standard SSD - // * Premium_LRS: Premium SDD - // * UltraSSD_LRS: Ultra SDD - // - // Defaults to Premium_LRS. For more details, visit the Azure documentation: - // https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types#disk-type-comparison - // - // +kubebuilder:default:=Premium_LRS - // +kubebuilder:validation:Enum=Standard_LRS;StandardSSD_LRS;Premium_LRS;UltraSSD_LRS - // +optional - DiskStorageAccountType string `json:"diskStorageAccountType,omitempty"` - - // AvailabilityZone is the failure domain identifier where the VM should be attached to. This must not be specified - // for clusters in a location that does not support AvailabilityZone. - // - // +optional - AvailabilityZone string `json:"availabilityZone,omitempty"` - - // DiskEncryptionSetID is the ID of the DiskEncryptionSet resource to use to encrypt the OS disks for the VMs. This - // needs to exist in the same subscription id listed in the Hosted Cluster, HostedCluster.Spec.Platform.Azure.SubscriptionID. - // DiskEncryptionSetID should also exist in a resource group under the same subscription id and the same location - // listed in the Hosted Cluster, HostedCluster.Spec.Platform.Azure.Location. - // - // +optional - DiskEncryptionSetID string `json:"diskEncryptionSetID,omitempty"` - - // EnableEphemeralOSDisk is a flag when set to true, will enable ephemeral OS disk. - // - // +optional - EnableEphemeralOSDisk bool `json:"enableEphemeralOSDisk,omitempty"` - - // SubnetID is the subnet ID of an existing subnet where the nodes in the nodepool will be created. This can be a - // different subnet than the one listed in the HostedCluster, HostedCluster.Spec.Platform.Azure.SubnetID, but must - // exist in the same HostedCluster.Spec.Platform.Azure.VnetID and must exist under the same subscription ID, - // HostedCluster.Spec.Platform.Azure.SubscriptionID. - // - // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="SubnetID is immutable" - // +kubebuilder:validation:Required - // +immutable - // +required - SubnetID string `json:"subnetID"` - - // Diagnostics specifies the diagnostics settings for a virtual machine. - // If not specified, then Boot diagnostics will be disabled. - // +optional - Diagnostics *Diagnostics `json:"diagnostics,omitempty"` - - // MachineIdentityID is a user-assigned identity assigned to the VMs used to authenticate with Azure services. This - // field is expected to exist under the same resource group as HostedCluster.Spec.Platform.Azure.ResourceGroupName. This - // user assigned identity is expected to have the Contributor role assigned to it and scoped to the resource group - // under HostedCluster.Spec.Platform.Azure.ResourceGroupName. - // - // If this field is not supplied, the Service Principal credentials will be written to a file on the disk of each VM - // in order to be accessible by the cloud provider; the aforementioned credentials provided are the same ones as - // HostedCluster.Spec.Platform.Azure.Credentials. However, this is less secure than using a managed identity. - // - // +optional - MachineIdentityID string `json:"machineIdentityID,omitempty"` -} - // We define our own condition type since metav1.Condition has validation // for Reason that might be broken by what we bubble up from CAPI. // NodePoolCondition defines an observation of NodePool resource operational state. @@ -1035,60 +501,34 @@ type NodePoolCondition struct { ObservedGeneration int64 `json:"observedGeneration,omitempty"` } -// NodePoolPlatformStatus contains specific platform statuses -type NodePoolPlatformStatus struct { - // KubeVirt contains the KubeVirt platform statuses - // +optional - KubeVirt *KubeVirtNodePoolStatus `json:"kubeVirt,omitempty"` -} - -// KubeVirtNodePoolStatus contains the KubeVirt platform statuses -type KubeVirtNodePoolStatus struct { - // CacheName holds the name of the cache DataVolume, if exists - // +optional - CacheName string `json:"cacheName,omitempty"` - - // Credentials shows the client credentials used when creating KubeVirt virtual machines. - // This filed is only exists when the KubeVirt virtual machines are being placed - // on a cluster separate from the one hosting the Hosted Control Plane components. - // - // The default behavior when Credentials is not defined is for the KubeVirt VMs to be placed on - // the same cluster and namespace as the Hosted Control Plane. - // +optional - Credentials *KubevirtPlatformCredentials `json:"credentials,omitempty"` -} - -// Taint is as v1 Core but without TimeAdded. +// taint is as v1 Core but without TimeAdded. // https://github.com/kubernetes/kubernetes/blob/ed8cad1e80d096257921908a52ac69cf1f41a098/staging/src/k8s.io/api/core/v1/types.go#L3037-L3053 +// Validation replicates the same validation as the upstream https://github.com/kubernetes/kubernetes/blob/9a2a7537f035969a68e432b4cc276dbce8ce1735/pkg/util/taints/taints.go#L273. +// See also https://kubernetes.io/docs/concepts/overview/working-with-objects/names/. type Taint struct { - // Required. The taint key to be applied to a node. + // key is the taint key to be applied to a node. + // +required + // +kubebuilder:validation:XValidation:rule=`self.matches('^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\\/)?[A-Za-z0-9]([-A-Za-z0-9_.]{0,61}[A-Za-z0-9])?$')`,message="key must be a qualified name with an optional subdomain prefix e.g. example.com/MyName" + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 Key string `json:"key"` - // The taint value corresponding to the taint key. + + // value is the taint value corresponding to the taint key. // +optional - // +kubebuilder:validation:Pattern:=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$` + // +kubebuilder:validation:XValidation:rule=`self.matches('^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$')`,message="Value must start and end with alphanumeric characters and can only contain '-', '_', '.' in the middle" + // +kubebuilder:validation:MaxLength=253 Value string `json:"value,omitempty"` - // Required. The effect of the taint on pods + // +required + // effect is the effect of the taint on pods // that do not tolerate the taint. // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + // +kubebuilder:validation:Enum=NoSchedule;PreferNoSchedule;NoExecute Effect corev1.TaintEffect `json:"effect"` } -// Diagnostics specifies the diagnostics settings for a virtual machine. -// +kubebuilder:validation:XValidation:rule="self.storageAccountType == 'UserManaged' ? has(self.storageAccountURI) : true", message="storageAccountURI is required when storageAccountType is UserManaged" -type Diagnostics struct { - // StorageAccountType determines if the storage account for storing the diagnostics data - // should be disabled (Disabled), provisioned by Azure (Managed) or by the user (UserManaged). - // +kubebuilder:validation:Enum=Managed;UserManaged;Disabled - // +kubebuilder:default:=Disabled - StorageAccountType string `json:"storageAccountType,omitempty"` - // StorageAccountURI is the URI of the user-managed storage account. - // The URI typically will be `https://.blob.core.windows.net/` - // but may differ if you are using Azure DNS zone endpoints. - // You can find the correct endpoint by looking for the Blob Primary Endpoint in the - // endpoints tab in the Azure console or with the CLI by issuing - // `az storage account list --query='[].{name: name, "resource group": resourceGroup, "blob endpoint": primaryEndpoints.blob}'`. - // +kubebuilder:validation:Format=uri - // +kubebuilder:validation:MaxLength=1024 +// NodePoolPlatformStatus contains specific platform statuses +type NodePoolPlatformStatus struct { + // KubeVirt contains the KubeVirt platform statuses // +optional - StorageAccountURI string `json:"storageAccountURI,omitempty"` + KubeVirt *KubeVirtNodePoolStatus `json:"kubeVirt,omitempty"` } diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/openstack.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/openstack.go new file mode 100644 index 000000000..a0f658a04 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/openstack.go @@ -0,0 +1,420 @@ +package v1beta1 + +// PortSecurityPolicy defines whether or not to enable port security on a port. +type PortSecurityPolicy string + +const ( + // PortSecurityEnabled enables port security on a port. + PortSecurityEnabled PortSecurityPolicy = "Enabled" + + // PortSecurityDisabled disables port security on a port. + PortSecurityDisabled PortSecurityPolicy = "Disabled" + + // PortSecurityDefault uses the default port security policy. + PortSecurityDefault PortSecurityPolicy = "" +) + +type OpenStackNodePoolPlatform struct { + // Flavor is the OpenStack flavor to use for the node instances. + // + // +kubebuilder:validation:Required + // +required + Flavor string `json:"flavor"` + + // ImageName is the OpenStack Glance image name to use for node instances. If unspecified, the default + // is chosen based on the NodePool release payload image. + // + // +optional + ImageName string `json:"imageName,omitempty"` + + // availabilityZone is the nova availability zone in which the provider will create the VM. + // If not specified, the VM will be created in the default availability zone specified in the nova configuration. + // Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances + // are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs + // to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. + // The maximum length of availability zone name is 63 as per labels limits. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[^: ]*$` + // +kubebuilder:validation:MaxLength=63 + // +optional + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // AdditionalPorts is a list of additional ports to create on the node instances. + // + // +optional + AdditionalPorts []PortSpec `json:"additionalPorts,omitempty"` +} + +// OpenStackPlatformSpec specifies configuration for clusters running on OpenStack. +type OpenStackPlatformSpec struct { + // IdentityRef is a reference to a secret holding OpenStack credentials + // to be used when reconciling the hosted cluster. + // + // +kubebuilder:validation:Required + // +required + IdentityRef OpenStackIdentityReference `json:"identityRef"` + + // ManagedSubnets describe the OpenStack Subnet to be created. Cluster actuator will create a network, + // and a subnet with the defined DNSNameservers, AllocationPools and the CIDR defined in the HostedCluster + // MachineNetwork, and a router connected to the subnet. Currently only one IPv4 + // subnet is supported. + // + // +kubebuilder:validation:MaxItems=1 + // +listType=atomic + // +optional + ManagedSubnets []SubnetSpec `json:"managedSubnets,omitempty"` + + // Router specifies an existing router to be used if ManagedSubnets are + // specified. If specified, no new router will be created. + // + // +optional + Router *RouterParam `json:"router,omitempty"` + + // Network specifies an existing network to use if no ManagedSubnets + // are specified. + // +optional + Network *NetworkParam `json:"network,omitempty"` + + // Subnets specifies existing subnets to use if not ManagedSubnets are + // specified. All subnets must be in the network specified by Network. + // There can be zero, one, or two subnets. If no subnets are specified, + // all subnets in Network will be used. If 2 subnets are specified, one + // must be IPv4 and the other IPv6. + // + // +kubebuilder:validation:MaxItems=2 + // +listType=atomic + // +optional + Subnets []SubnetParam `json:"subnets,omitempty"` + + // NetworkMTU sets the maximum transmission unit (MTU) value to address fragmentation for the private network ID. + // This value will be used only if the Cluster actuator creates the network. + // If left empty, the network will have the default MTU defined in Openstack network service. + // To use this field, the Openstack installation requires the net-mtu neutron API extension. + // + // +optional + NetworkMTU *int `json:"networkMTU,omitempty"` + + // ExternalNetwork is the OpenStack Network to be used to get public internet to the VMs. + // This option is ignored if DisableExternalNetwork is set to true. + // + // If ExternalNetwork is defined it must refer to exactly one external network. + // + // If ExternalNetwork is not defined or is empty the controller will use any + // existing external network as long as there is only one. It is an + // error if ExternalNetwork is not defined and there are multiple + // external networks unless DisableExternalNetwork is also set. + // + // If ExternalNetwork is not defined and there are no external networks + // the controller will proceed as though DisableExternalNetwork was set. + // + // +optional + ExternalNetwork *NetworkParam `json:"externalNetwork,omitempty"` + + // DisableExternalNetwork specifies whether or not to attempt to connect the cluster + // to an external network. This allows for the creation of clusters when connecting + // to an external network is not possible or desirable, e.g. if using a provider network. + // + // +optional + DisableExternalNetwork *bool `json:"disableExternalNetwork,omitempty"` + + // Tags to set on all resources in cluster which support tags + // + // +listType=set + // +optional + Tags []string `json:"tags,omitempty"` + + // IngressFloatingIP is an available floating IP in your OpenStack cluster that will + // be associated with the OpenShift ingress port. + // When not specified, an IP address will be assigned randomly by the OpenStack cloud provider. + // When specified, the floating IP has to be pre-created. If the + // specified value is not a floating IP or is already claimed, the + // OpenStack cloud provider won't be able to provision the load + // balancer. + // This value must be a valid IPv4 or IPv6 address. + // + // +kubebuilder:validation:XValidation:rule="isIP(self)",message="floatingIP must be a valid IPv4 or IPv6 address" + // +optional + IngressFloatingIP string `json:"ingressFloatingIP,omitempty"` +} + +// OpenStackIdentityReference is a reference to an infrastructure +// provider identity to be used to provision cluster resources. +type OpenStackIdentityReference struct { + // Name is the name of a secret in the same namespace as the resource being provisioned. + // The secret must contain a key named `clouds.yaml` which contains an OpenStack clouds.yaml file. + // The secret may optionally contain a key named `cacert` containing a PEM-encoded CA certificate. + // + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // CloudName specifies the name of the entry in the clouds.yaml file to use. + // + // +kubebuilder:validation:Required + // +required + CloudName string `json:"cloudName"` +} + +type SubnetSpec struct { + // DNSNameservers holds a list of DNS server addresses that will be provided when creating + // the subnet. These addresses need to have the same IP version as CIDR. + // + // +optional + DNSNameservers []string `json:"dnsNameservers,omitempty"` + + // AllocationPools is an array of AllocationPool objects that will be applied to OpenStack Subnet being created. + // If set, OpenStack will only allocate these IPs for Machines. It will still be possible to create ports from + // outside of these ranges manually. + // + // +optional + AllocationPools []AllocationPool `json:"allocationPools,omitempty"` +} + +type AllocationPool struct { + // Start represents the start of the AllocationPool, that is the lowest IP of the pool. + // + // +kubebuilder:validation:Required + // +required + Start string `json:"start"` + + // End represents the end of the AlloctionPool, that is the highest IP of the pool. + // + // +kubebuilder:validation:Required + // +required + End string `json:"end"` +} + +// RouterParam specifies an OpenStack router to use. It may be specified by either ID or filter, but not both. +// +kubebuilder:validation:MaxProperties:=1 +// +kubebuilder:validation:MinProperties:=1 +type RouterParam struct { + // ID is the ID of the router to use. If ID is provided, the other filters cannot be provided. Must be in UUID format. + // + // +kubebuilder:validation:Format:=uuid + // +optional + ID *string `json:"id,omitempty"` + + // Filter specifies a filter to select an OpenStack router. If provided, cannot be empty. + // + // +optional + Filter *RouterFilter `json:"filter,omitempty"` +} + +// RouterFilter specifies a query to select an OpenStack router. At least one property must be set. +// +kubebuilder:validation:MinProperties:=1 +type RouterFilter struct { + // Name is the name of the router to filter by. + // + // +optional + Name string `json:"name,omitempty"` + + // Description is the description of the router to filter by. + // + // +optional + Description string `json:"description,omitempty"` + + // ProjectID is the project ID of the router to filter by. + // + // +optional + ProjectID string `json:"projectID,omitempty"` + + // FilterByNeutronTags specifies tags to filter by. + // + // +optional + FilterByNeutronTags `json:",inline"` +} + +// NetworkParam specifies an OpenStack network. It may be specified by either ID or Filter, but not both. +// +kubebuilder:validation:MaxProperties:=1 +// +kubebuilder:validation:MinProperties:=1 +type NetworkParam struct { + // ID is the ID of the network to use. If ID is provided, the other filters cannot be provided. Must be in UUID format. + // + // +kubebuilder:validation:Format:=uuid + // +optional + ID *string `json:"id,omitempty"` + + // Filter specifies a filter to select an OpenStack network. If provided, cannot be empty. + // + // +optional + Filter *NetworkFilter `json:"filter,omitempty"` +} + +// NetworkFilter specifies a query to select an OpenStack network. At least one property must be set. +// +kubebuilder:validation:MinProperties:=1 +type NetworkFilter struct { + // Name is the name of the network to filter by. + // + // +optional + Name string `json:"name,omitempty"` + + // Description is the description of the network to filter by. + // + // +optional + Description string `json:"description,omitempty"` + + // ProjectID is the project ID of the network to filter by. + // + // +optional + ProjectID string `json:"projectID,omitempty"` + + // FilterByNeutronTags specifies tags to filter by. + // + // +optional + FilterByNeutronTags `json:",inline"` +} + +// NeutronTag represents a tag on a Neutron resource. +// It may not be empty and may not contain commas. +// +kubebuilder:validation:Pattern:="^[^,]+$" +// +kubebuilder:validation:MinLength:=1 +type NeutronTag string + +type FilterByNeutronTags struct { + // Tags is a list of tags to filter by. If specified, the resource must + // have all of the tags specified to be included in the result. + // + // +listType=set + // +optional + Tags []NeutronTag `json:"tags,omitempty"` + + // TagsAny is a list of tags to filter by. If specified, the resource + // must have at least one of the tags specified to be included in the + // result. + // + // +listType=set + // +optional + TagsAny []NeutronTag `json:"tagsAny,omitempty"` + + // NotTags is a list of tags to filter by. If specified, resources which + // contain all of the given tags will be excluded from the result. + // + // +listType=set + // +optional + NotTags []NeutronTag `json:"notTags,omitempty"` + + // NotTagsAny is a list of tags to filter by. If specified, resources + // which contain any of the given tags will be excluded from the result. + // + // +listType=set + // +optional + NotTagsAny []NeutronTag `json:"notTagsAny,omitempty"` +} + +// SubnetParam specifies an OpenStack subnet to use. It may be specified by either ID or filter, but not both. +// +kubebuilder:validation:MaxProperties:=1 +// +kubebuilder:validation:MinProperties:=1 +type SubnetParam struct { + // ID is the uuid of the subnet. It will not be validated. + // + // +kubebuilder:validation:Format:=uuid + // +optional + ID *string `json:"id,omitempty"` + + // Filter specifies a filter to select the subnet. It must match exactly one subnet. + // + // +optional + Filter *SubnetFilter `json:"filter,omitempty"` +} + +// SubnetFilter specifies a filter to select a subnet. At least one parameter must be specified. +// +kubebuilder:validation:MinProperties:=1 +type SubnetFilter struct { + // Name is the name of the subnet to filter by. + // + // +optional + Name string `json:"name,omitempty"` + // Description is the description of the subnet to filter by. + // + // +optional + Description string `json:"description,omitempty"` + + // ProjectID is the project ID of the subnet to filter by. + // + // +optional + ProjectID string `json:"projectID,omitempty"` + + // IPVersion is the IP version of the subnet to filter by. + // + // +optional + IPVersion int `json:"ipVersion,omitempty"` + + // GatewayIP is the gateway IP of the subnet to filter by. + // + // +optional + GatewayIP string `json:"gatewayIP,omitempty"` + + // CIDR is the CIDR of the subnet to filter by. + // + // +optional + CIDR string `json:"cidr,omitempty"` + + // IPv6AddressMode is the IPv6 address mode of the subnet to filter by. + // + // +optional + IPv6AddressMode string `json:"ipv6AddressMode,omitempty"` + + // IPv6RAMode is the IPv6 RA mode of the subnet to filter by. + // + // +optional + IPv6RAMode string `json:"ipv6RAMode,omitempty"` + + // FilterByNeutronTags specifies tags to filter by. + // + // +optional + FilterByNeutronTags `json:",inline"` +} + +// PortSpec specifies the options for creating a port. +type PortSpec struct { + // Network is a query for an openstack network that the port will be created or discovered on. + // This will fail if the query returns more than one network. + // + // +optional + Network *NetworkParam `json:"network,omitempty"` + + // Description is a human-readable description for the port. + // + // +optional + Description string `json:"description,omitempty"` + + // AllowedAddressPairs is a list of address pairs which Neutron will + // allow the port to send traffic from in addition to the port's + // addresses. If not specified, the MAC Address will be the MAC Address + // of the port. Depending on the configuration of Neutron, it may be + // supported to specify a CIDR instead of a specific IP address. + // + // +optional + AllowedAddressPairs []AddressPair `json:"allowedAddressPairs,omitempty"` + + // VNICType specifies the type of vNIC which this port should be + // attached to. This is used to determine which mechanism driver(s) to + // be used to bind the port. The valid values are normal, macvtap, + // direct, baremetal, direct-physical, virtio-forwarder, smart-nic and + // remote-managed, although these values will not be validated in this + // API to ensure compatibility with future neutron changes or custom + // implementations. What type of vNIC is actually available depends on + // deployments. If not specified, the Neutron default value is used. + // + // +optional + VNICType string `json:"vnicType,omitempty"` + + // PortSecurityPolicy specifies whether or not to enable port security on the port. + // Allowed values are "Enabled", "Disabled" and omitted. + // When not set, it takes the value of the corresponding field at the network level. + // + // +kubebuilder:validation:Enum:=Enabled;Disabled;"" + // +optional + PortSecurityPolicy PortSecurityPolicy `json:"portSecurityPolicy,omitempty"` +} + +type AddressPair struct { + // IPAddress is the IP address of the allowed address pair. Depending on + // the configuration of Neutron, it may be supported to specify a CIDR + // instead of a specific IP address. + // + // +kubebuilder:validation:Required + // +required + IPAddress string `json:"ipAddress"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/powervs.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/powervs.go new file mode 100644 index 000000000..24fa77149 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/powervs.go @@ -0,0 +1,295 @@ +package v1beta1 + +import ( + "fmt" + + "github.com/openshift/hypershift/api/ibmcapi" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PowerVSNodePoolProcType defines processor type to be used for PowerVSNodePoolPlatform +type PowerVSNodePoolProcType string + +func (p *PowerVSNodePoolProcType) String() string { + return string(*p) +} + +func (p *PowerVSNodePoolProcType) Set(s string) error { + switch s { + case string(PowerVSNodePoolSharedProcType), string(PowerVSNodePoolCappedProcType), string(PowerVSNodePoolDedicatedProcType): + *p = PowerVSNodePoolProcType(s) + return nil + default: + return fmt.Errorf("unknown processor type used %s", s) + } +} + +func (p *PowerVSNodePoolProcType) Type() string { + return "PowerVSNodePoolProcType" +} + +const ( + // PowerVSNodePoolDedicatedProcType defines dedicated processor type + PowerVSNodePoolDedicatedProcType = PowerVSNodePoolProcType("dedicated") + + // PowerVSNodePoolSharedProcType defines shared processor type + PowerVSNodePoolSharedProcType = PowerVSNodePoolProcType("shared") + + // PowerVSNodePoolCappedProcType defines capped processor type + PowerVSNodePoolCappedProcType = PowerVSNodePoolProcType("capped") +) + +func (p *PowerVSNodePoolProcType) CastToCAPIPowerVSProcessorType() ibmcapi.PowerVSProcessorType { + switch *p { + case PowerVSNodePoolDedicatedProcType: + return ibmcapi.PowerVSProcessorTypeDedicated + case PowerVSNodePoolCappedProcType: + return ibmcapi.PowerVSProcessorTypeCapped + default: + return ibmcapi.PowerVSProcessorTypeShared + } +} + +// PowerVSNodePoolStorageType defines storage type to be used for PowerVSNodePoolPlatform +type PowerVSNodePoolStorageType string + +// PowerVSNodePoolImageDeletePolicy defines image delete policy to be used for PowerVSNodePoolPlatform +type PowerVSNodePoolImageDeletePolicy string + +// PowerVSNodePoolPlatform specifies the configuration of a NodePool when operating +// on IBMCloud PowerVS platform. +type PowerVSNodePoolPlatform struct { + // SystemType is the System type used to host the instance. + // systemType determines the number of cores and memory that is available. + // Few of the supported SystemTypes are s922,e880,e980. + // e880 systemType available only in Dallas Datacenters. + // e980 systemType available in Datacenters except Dallas and Washington. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default. The current default is s922 which is generally available. + // + // +optional + // +kubebuilder:default=s922 + SystemType string `json:"systemType,omitempty"` + + // ProcessorType is the VM instance processor type. + // It must be set to one of the following values: Dedicated, Capped or Shared. + // + // Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. + // Shared: Shared among other clients. + // Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. + // + // if the processorType is selected as Dedicated, then Processors value cannot be fractional. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default. The current default is shared. + // + // +kubebuilder:default=shared + // +kubebuilder:validation:Enum=dedicated;shared;capped + // +optional + ProcessorType PowerVSNodePoolProcType `json:"processorType,omitempty"` + + // Processors is the number of virtual processors in a virtual machine. + // when the processorType is selected as Dedicated the processors value cannot be fractional. + // maximum value for the Processors depends on the selected SystemType. + // when SystemType is set to e880 or e980 maximum Processors value is 143. + // when SystemType is set to s922 maximum Processors value is 15. + // minimum value for Processors depends on the selected ProcessorType. + // when ProcessorType is set as Shared or Capped, The minimum processors is 0.5. + // when ProcessorType is set as Dedicated, The minimum processors is 1. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default. The default is set based on the selected ProcessorType. + // when ProcessorType selected as Dedicated, the default is set to 1. + // when ProcessorType selected as Shared or Capped, the default is set to 0.5. + // + // +optional + // +kubebuilder:default="0.5" + Processors intstr.IntOrString `json:"processors,omitempty"` + + // MemoryGiB is the size of a virtual machine's memory, in GiB. + // maximum value for the MemoryGiB depends on the selected SystemType. + // when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB. + // when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB. + // when SystemType is set to s922 maximum MemoryGiB value is 942 GiB. + // The minimum memory is 32 GiB. + // + // When omitted, this means the user has no opinion and the platform is left to choose a reasonable + // default. The current default is 32. + // + // +optional + // +kubebuilder:default=32 + MemoryGiB int32 `json:"memoryGiB,omitempty"` + + // Image used for deploying the nodes. If unspecified, the default + // is chosen based on the NodePool release payload image. + // + // +optional + Image *PowerVSResourceReference `json:"image,omitempty"` + + // StorageType for the image and nodes, this will be ignored if Image is specified. + // The storage tiers in PowerVS are based on I/O operations per second (IOPS). + // It means that the performance of your storage volumes is limited to the maximum number of IOPS based on volume size and storage tier. + // Although, the exact numbers might change over time, the Tier 3 storage is currently set to 3 IOPS/GB, and the Tier 1 storage is currently set to 10 IOPS/GB. + // + // The default is tier1 + // + // +kubebuilder:default=tier1 + // +kubebuilder:validation:Enum=tier1;tier3 + // +optional + StorageType PowerVSNodePoolStorageType `json:"storageType,omitempty"` + + // ImageDeletePolicy is policy for the image deletion. + // + // delete: delete the image from the infrastructure. + // retain: delete the image from the openshift but retain in the infrastructure. + // + // The default is delete + // + // +kubebuilder:default=delete + // +kubebuilder:validation:Enum=delete;retain + // +optional + ImageDeletePolicy PowerVSNodePoolImageDeletePolicy `json:"imageDeletePolicy,omitempty"` +} + +// PowerVSPlatformSpec defines IBMCloud PowerVS specific settings for components +type PowerVSPlatformSpec struct { + // AccountID is the IBMCloud account id. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + AccountID string `json:"accountID"` + + // CISInstanceCRN is the IBMCloud CIS Service Instance's Cloud Resource Name + // This field is immutable. Once set, It can't be changed. + // + // +kubebuilder:validation:Pattern=`^crn:` + // +immutable + CISInstanceCRN string `json:"cisInstanceCRN"` + + // ResourceGroup is the IBMCloud Resource Group in which the cluster resides. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + ResourceGroup string `json:"resourceGroup"` + + // Region is the IBMCloud region in which the cluster resides. This configures the + // OCP control plane cloud integrations, and is used by NodePool to resolve + // the correct boot image for a given release. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + Region string `json:"region"` + + // Zone is the availability zone where control plane cloud resources are + // created. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + Zone string `json:"zone"` + + // Subnet is the subnet to use for control plane cloud resources. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + Subnet *PowerVSResourceReference `json:"subnet"` + + // ServiceInstance is the reference to the Power VS service on which the server instance(VM) will be created. + // Power VS service is a container for all Power VS instances at a specific geographic region. + // serviceInstance can be created via IBM Cloud catalog or CLI. + // ServiceInstanceID is the unique identifier that can be obtained from IBM Cloud UI or IBM Cloud cli. + // + // More detail about Power VS service instance. + // https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server + // + // This field is immutable. Once set, It can't be changed. + // + // +immutable + ServiceInstanceID string `json:"serviceInstanceID"` + + // VPC specifies IBM Cloud PowerVS Load Balancing configuration for the control + // plane. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + VPC *PowerVSVPC `json:"vpc"` + + // KubeCloudControllerCreds is a reference to a secret containing cloud + // credentials with permissions matching the cloud controller policy. + // This field is immutable. Once set, It can't be changed. + // + // TODO(dan): document the "cloud controller policy" + // + // +immutable + KubeCloudControllerCreds corev1.LocalObjectReference `json:"kubeCloudControllerCreds"` + + // NodePoolManagementCreds is a reference to a secret containing cloud + // credentials with permissions matching the node pool management policy. + // This field is immutable. Once set, It can't be changed. + // + // TODO(dan): document the "node pool management policy" + // + // +immutable + NodePoolManagementCreds corev1.LocalObjectReference `json:"nodePoolManagementCreds"` + + // IngressOperatorCloudCreds is a reference to a secret containing ibm cloud + // credentials for ingress operator to get authenticated with ibm cloud. + // + // +immutable + IngressOperatorCloudCreds corev1.LocalObjectReference `json:"ingressOperatorCloudCreds"` + + // StorageOperatorCloudCreds is a reference to a secret containing ibm cloud + // credentials for storage operator to get authenticated with ibm cloud. + // + // +immutable + StorageOperatorCloudCreds corev1.LocalObjectReference `json:"storageOperatorCloudCreds"` + + // ImageRegistryOperatorCloudCreds is a reference to a secret containing ibm cloud + // credentials for image registry operator to get authenticated with ibm cloud. + // + // +immutable + ImageRegistryOperatorCloudCreds corev1.LocalObjectReference `json:"imageRegistryOperatorCloudCreds"` +} + +// PowerVSVPC specifies IBM Cloud PowerVS LoadBalancer configuration for the control +// plane. +type PowerVSVPC struct { + // Name for VPC to used for all the service load balancer. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + Name string `json:"name"` + + // Region is the IBMCloud region in which VPC gets created, this VPC used for all the ingress traffic + // into the OCP cluster. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + Region string `json:"region"` + + // Zone is the availability zone where load balancer cloud resources are + // created. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + // +optional + Zone string `json:"zone,omitempty"` + + // Subnet is the subnet to use for load balancer. + // This field is immutable. Once set, It can't be changed. + // + // +immutable + // +optional + Subnet string `json:"subnet,omitempty"` +} + +// PowerVSResourceReference is a reference to a specific IBMCloud PowerVS resource by ID, or Name. +// Only one of ID, or Name may be specified. Specifying more than one will result in +// a validation error. +type PowerVSResourceReference struct { + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + + // Name of resource + // +optional + Name *string `json:"name,omitempty"` +} diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.deepcopy.go index 83ae4c62b..f5c45b362 100644 --- a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.deepcopy.go @@ -298,6 +298,11 @@ func (in *AWSNodePoolPlatform) DeepCopyInto(out *AWSNodePoolPlatform) { *out = make([]AWSResourceTag, len(*in)) copy(*out, *in) } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(PlacementOptions) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNodePoolPlatform. @@ -334,6 +339,11 @@ func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.SharedVPC != nil { + in, out := &in.SharedVPC, &out.SharedVPC + *out = new(AWSSharedVPC) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec. @@ -448,6 +458,52 @@ func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSSharedVPC) DeepCopyInto(out *AWSSharedVPC) { + *out = *in + out.RolesRef = in.RolesRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSharedVPC. +func (in *AWSSharedVPC) DeepCopy() *AWSSharedVPC { + if in == nil { + return nil + } + out := new(AWSSharedVPC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSSharedVPCRolesRef) DeepCopyInto(out *AWSSharedVPCRolesRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSharedVPCRolesRef. +func (in *AWSSharedVPCRolesRef) DeepCopy() *AWSSharedVPCRolesRef { + if in == nil { + return nil + } + out := new(AWSSharedVPCRolesRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressPair) DeepCopyInto(out *AddressPair) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressPair. +func (in *AddressPair) DeepCopy() *AddressPair { + if in == nil { + return nil + } + out := new(AddressPair) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AgentNodePoolPlatform) DeepCopyInto(out *AgentNodePoolPlatform) { *out = *in @@ -522,6 +578,7 @@ func (in *AzureKMSSpec) DeepCopyInto(out *AzureKMSSpec) { *out = new(AzureKMSKey) **out = **in } + out.KMS = in.KMS } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureKMSSpec. @@ -534,13 +591,45 @@ func (in *AzureKMSSpec) DeepCopy() *AzureKMSSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMarketplaceImage) DeepCopyInto(out *AzureMarketplaceImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMarketplaceImage. +func (in *AzureMarketplaceImage) DeepCopy() *AzureMarketplaceImage { + if in == nil { + return nil + } + out := new(AzureMarketplaceImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureNodePoolOSDisk) DeepCopyInto(out *AzureNodePoolOSDisk) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNodePoolOSDisk. +func (in *AzureNodePoolOSDisk) DeepCopy() *AzureNodePoolOSDisk { + if in == nil { + return nil + } + out := new(AzureNodePoolOSDisk) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AzureNodePoolPlatform) DeepCopyInto(out *AzureNodePoolPlatform) { *out = *in + in.Image.DeepCopyInto(&out.Image) + out.OSDisk = in.OSDisk if in.Diagnostics != nil { in, out := &in.Diagnostics, &out.Diagnostics *out = new(Diagnostics) - **out = **in + (*in).DeepCopyInto(*out) } } @@ -558,6 +647,7 @@ func (in *AzureNodePoolPlatform) DeepCopy() *AzureNodePoolPlatform { func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) { *out = *in out.Credentials = in.Credentials + out.ManagedIdentities = in.ManagedIdentities } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec. @@ -570,6 +660,47 @@ func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureResourceManagedIdentities) DeepCopyInto(out *AzureResourceManagedIdentities) { + *out = *in + out.ControlPlane = in.ControlPlane +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureResourceManagedIdentities. +func (in *AzureResourceManagedIdentities) DeepCopy() *AzureResourceManagedIdentities { + if in == nil { + return nil + } + out := new(AzureResourceManagedIdentities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureVMImage) DeepCopyInto(out *AzureVMImage) { + *out = *in + if in.ImageID != nil { + in, out := &in.ImageID, &out.ImageID + *out = new(string) + **out = **in + } + if in.AzureMarketplace != nil { + in, out := &in.AzureMarketplace, &out.AzureMarketplace + *out = new(AzureMarketplaceImage) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureVMImage. +func (in *AzureVMImage) DeepCopy() *AzureVMImage { + if in == nil { + return nil + } + out := new(AzureVMImage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CertificateSigningRequestApproval) DeepCopyInto(out *CertificateSigningRequestApproval) { *out = *in @@ -848,6 +979,146 @@ func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentResource) DeepCopyInto(out *ComponentResource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentResource. +func (in *ComponentResource) DeepCopy() *ComponentResource { + if in == nil { + return nil + } + out := new(ComponentResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneComponent) DeepCopyInto(out *ControlPlaneComponent) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponent. +func (in *ControlPlaneComponent) DeepCopy() *ControlPlaneComponent { + if in == nil { + return nil + } + out := new(ControlPlaneComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControlPlaneComponent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneComponentList) DeepCopyInto(out *ControlPlaneComponentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControlPlaneComponent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponentList. +func (in *ControlPlaneComponentList) DeepCopy() *ControlPlaneComponentList { + if in == nil { + return nil + } + out := new(ControlPlaneComponentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControlPlaneComponentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneComponentSpec) DeepCopyInto(out *ControlPlaneComponentSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponentSpec. +func (in *ControlPlaneComponentSpec) DeepCopy() *ControlPlaneComponentSpec { + if in == nil { + return nil + } + out := new(ControlPlaneComponentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneComponentStatus) DeepCopyInto(out *ControlPlaneComponentStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ComponentResource, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponentStatus. +func (in *ControlPlaneComponentStatus) DeepCopy() *ControlPlaneComponentStatus { + if in == nil { + return nil + } + out := new(ControlPlaneComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneManagedIdentities) DeepCopyInto(out *ControlPlaneManagedIdentities) { + *out = *in + out.ManagedIdentitiesKeyVault = in.ManagedIdentitiesKeyVault + out.CloudProvider = in.CloudProvider + out.NodePoolManagement = in.NodePoolManagement + out.ControlPlaneOperator = in.ControlPlaneOperator + out.ImageRegistry = in.ImageRegistry + out.Ingress = in.Ingress + out.Network = in.Network + out.Disk = in.Disk + out.File = in.File +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneManagedIdentities. +func (in *ControlPlaneManagedIdentities) DeepCopy() *ControlPlaneManagedIdentities { + if in == nil { + return nil + } + out := new(ControlPlaneManagedIdentities) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { *out = *in @@ -871,6 +1142,11 @@ func (in *DNSSpec) DeepCopy() *DNSSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Diagnostics) DeepCopyInto(out *Diagnostics) { *out = *in + if in.UserManaged != nil { + in, out := &in.UserManaged, &out.UserManaged + *out = new(UserManagedDiagnostics) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Diagnostics. @@ -1112,6 +1388,13 @@ func (in *HostedClusterSpec) DeepCopyInto(out *HostedClusterSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedClusterSpec. @@ -1304,6 +1587,13 @@ func (in *HostedControlPlaneSpec) DeepCopyInto(out *HostedControlPlaneSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneSpec. @@ -1969,6 +2259,21 @@ func (in *MachineNetworkEntry) DeepCopy() *MachineNetworkEntry { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedAzureKeyVault) DeepCopyInto(out *ManagedAzureKeyVault) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedAzureKeyVault. +func (in *ManagedAzureKeyVault) DeepCopy() *ManagedAzureKeyVault { + if in == nil { + return nil + } + out := new(ManagedAzureKeyVault) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedEtcdSpec) DeepCopyInto(out *ManagedEtcdSpec) { *out = *in @@ -2010,6 +2315,21 @@ func (in *ManagedEtcdStorageSpec) DeepCopy() *ManagedEtcdStorageSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedIdentity) DeepCopyInto(out *ManagedIdentity) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedIdentity. +func (in *ManagedIdentity) DeepCopy() *ManagedIdentity { + if in == nil { + return nil + } + out := new(ManagedIdentity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkFilter) DeepCopyInto(out *NetworkFilter) { *out = *in @@ -2202,7 +2522,7 @@ func (in *NodePoolPlatform) DeepCopyInto(out *NodePoolPlatform) { if in.OpenStack != nil { in, out := &in.OpenStack, &out.OpenStack *out = new(OpenStackNodePoolPlatform) - **out = **in + (*in).DeepCopyInto(*out) } } @@ -2361,6 +2681,13 @@ func (in *OpenStackIdentityReference) DeepCopy() *OpenStackIdentityReference { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenStackNodePoolPlatform) DeepCopyInto(out *OpenStackNodePoolPlatform) { *out = *in + if in.AdditionalPorts != nil { + in, out := &in.AdditionalPorts, &out.AdditionalPorts + *out = make([]PortSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackNodePoolPlatform. @@ -2458,6 +2785,21 @@ func (in *PersistentVolumeEtcdStorageSpec) DeepCopy() *PersistentVolumeEtcdStora return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementOptions) DeepCopyInto(out *PlacementOptions) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementOptions. +func (in *PlacementOptions) DeepCopy() *PlacementOptions { + if in == nil { + return nil + } + out := new(PlacementOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { *out = *in @@ -2528,6 +2870,31 @@ func (in *PlatformStatus) DeepCopy() *PlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSpec) DeepCopyInto(out *PortSpec) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkParam) + (*in).DeepCopyInto(*out) + } + if in.AllowedAddressPairs != nil { + in, out := &in.AllowedAddressPairs, &out.AllowedAddressPairs + *out = make([]AddressPair, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSpec. +func (in *PortSpec) DeepCopy() *PortSpec { + if in == nil { + return nil + } + out := new(PortSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PowerVSNodePoolPlatform) DeepCopyInto(out *PowerVSNodePoolPlatform) { *out = *in @@ -2919,6 +3286,21 @@ func (in *UnmanagedEtcdSpec) DeepCopy() *UnmanagedEtcdSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserManagedDiagnostics) DeepCopyInto(out *UserManagedDiagnostics) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserManagedDiagnostics. +func (in *UserManagedDiagnostics) DeepCopy() *UserManagedDiagnostics { + if in == nil { + return nil + } + out := new(UserManagedDiagnostics) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Volume) DeepCopyInto(out *Volume) { *out = *in diff --git a/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000..608af7ce1 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/api/hypershift/v1beta1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,231 @@ +awsendpointservices.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: awsendpointservices.hypershift.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: true + KindName: AWSEndpointService + Labels: {} + PluralName: awsendpointservices + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: [] + Version: v1beta1 + +certificatesigningrequestapprovals.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: certificatesigningrequestapprovals.hypershift.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: false + KindName: CertificateSigningRequestApproval + Labels: {} + PluralName: certificatesigningrequestapprovals + PrinterColumns: [] + Scope: Namespaced + ShortNames: + - csra + - csras + TopLevelFeatureGates: [] + Version: v1beta1 + +controlplanecomponents.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: controlplanecomponents.hypershift.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ControlPlaneV2 + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: true + KindName: ControlPlaneComponent + Labels: {} + PluralName: controlplanecomponents + PrinterColumns: + - description: Version + jsonPath: .status.version + name: Version + type: string + - description: Available + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Progressing + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Message + jsonPath: .status.conditions[?(@.type=="Available")].message + name: Message + type: string + - description: ProgressingMessage + jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: ProgressingMessage + priority: 1 + type: string + Scope: Namespaced + ShortNames: + - cpc + - cpcs + TopLevelFeatureGates: + - ControlPlaneV2 + Version: v1beta1 + +hostedclusters.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: hostedclusters.hypershift.openshift.io + Capability: "" + Category: "" + FeatureGates: + - AROHCPManagedIdentities + - DynamicResourceAllocation + - ExternalOIDC + - HCPPodsLabels + - NetworkDiagnosticsConfig + - OpenStack + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: true + KindName: HostedCluster + Labels: {} + PluralName: hostedclusters + PrinterColumns: + - description: Version + jsonPath: .status.version.history[?(@.state=="Completed")].version + name: Version + type: string + - description: KubeConfig Secret + jsonPath: .status.kubeconfig.name + name: KubeConfig + type: string + - description: Progress + jsonPath: .status.version.history[?(@.state!="")].state + name: Progress + type: string + - description: Available + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Progressing + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Message + jsonPath: .status.conditions[?(@.type=="Available")].message + name: Message + type: string + Scope: Namespaced + ShortNames: + - hc + - hcs + TopLevelFeatureGates: [] + Version: v1beta1 + +hostedcontrolplanes.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: hostedcontrolplanes.hypershift.openshift.io + Capability: "" + Category: cluster-api + FeatureGates: + - AROHCPManagedIdentities + - DynamicResourceAllocation + - ExternalOIDC + - NetworkDiagnosticsConfig + - OpenStack + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: true + KindName: HostedControlPlane + Labels: {} + PluralName: hostedcontrolplanes + PrinterColumns: [] + Scope: Namespaced + ShortNames: + - hcp + - hcps + TopLevelFeatureGates: [] + Version: v1beta1 + +nodepools.hypershift.openshift.io: + Annotations: {} + ApprovedPRNumber: "" + CRDName: nodepools.hypershift.openshift.io + Capability: "" + Category: "" + FeatureGates: + - OpenStack + FilenameOperatorName: "" + FilenameOperatorOrdering: "" + FilenameRunLevel: "" + GroupName: hypershift.openshift.io + HasStatus: true + KindName: NodePool + Labels: {} + PluralName: nodepools + PrinterColumns: + - description: Cluster + jsonPath: .spec.clusterName + name: Cluster + type: string + - description: Desired Nodes + jsonPath: .spec.replicas + name: Desired Nodes + type: integer + - description: Available Nodes + jsonPath: .status.replicas + name: Current Nodes + type: integer + - description: Autoscaling Enabled + jsonPath: .status.conditions[?(@.type=="AutoscalingEnabled")].status + name: Autoscaling + type: string + - description: Node Autorepair Enabled + jsonPath: .status.conditions[?(@.type=="AutorepairEnabled")].status + name: Autorepair + type: string + - description: Current version + jsonPath: .status.version + name: Version + type: string + - description: UpdatingVersion in progress + jsonPath: .status.conditions[?(@.type=="UpdatingVersion")].status + name: UpdatingVersion + type: string + - description: UpdatingConfig in progress + jsonPath: .status.conditions[?(@.type=="UpdatingConfig")].status + name: UpdatingConfig + type: string + - description: Message + jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Message + type: string + Scope: Namespaced + ShortNames: + - np + - nps + TopLevelFeatureGates: [] + Version: v1beta1 + diff --git a/vendor/github.com/openshift/hypershift/api/scheduling/v1alpha1/clustersizingconfiguration_types.go b/vendor/github.com/openshift/hypershift/api/scheduling/v1alpha1/clustersizingconfiguration_types.go index b62d53998..a41efcd22 100644 --- a/vendor/github.com/openshift/hypershift/api/scheduling/v1alpha1/clustersizingconfiguration_types.go +++ b/vendor/github.com/openshift/hypershift/api/scheduling/v1alpha1/clustersizingconfiguration_types.go @@ -137,6 +137,14 @@ type Effects struct { // MachineHealthCheckTimeout specifies an optional timeout for machinehealthchecks created // for HostedClusters with this specific size. MachineHealthCheckTimeout *metav1.Duration `json:"machineHealthCheckTimeout,omitempty"` + + // +kubebuilder:validation:Optional + // MaximumRequestsInFlight specifies the maximum requests in flight for Kube APIServer + MaximumRequestsInflight *int `json:"maximumRequestsInflight,omitempty"` + + // +kubebuilder:validation:Optional + // MaximumMutatingRequestsInflight specifies the maximum mutating requests in flight for Kube APIServer + MaximumMutatingRequestsInflight *int `json:"maximumMutatingRequestsInflight,omitempty"` } // Management configures behaviors of the management plane for a size class. diff --git a/vendor/github.com/openshift/hypershift/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hypershift/api/scheduling/v1alpha1/zz_generated.deepcopy.go index cff515f39..117d2e27b 100644 --- a/vendor/github.com/openshift/hypershift/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hypershift/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -186,6 +186,16 @@ func (in *Effects) DeepCopyInto(out *Effects) { *out = new(v1.Duration) **out = **in } + if in.MaximumRequestsInflight != nil { + in, out := &in.MaximumRequestsInflight, &out.MaximumRequestsInflight + *out = new(int) + **out = **in + } + if in.MaximumMutatingRequestsInflight != nil { + in, out := &in.MaximumMutatingRequestsInflight, &out.MaximumMutatingRequestsInflight + *out = new(int) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Effects. diff --git a/vendor/github.com/openshift/hypershift/api/util/ipnet/ipnet.go b/vendor/github.com/openshift/hypershift/api/util/ipnet/ipnet.go index 4a305a153..63fc6b7f4 100644 --- a/vendor/github.com/openshift/hypershift/api/util/ipnet/ipnet.go +++ b/vendor/github.com/openshift/hypershift/api/util/ipnet/ipnet.go @@ -16,6 +16,8 @@ var nullBytes = []byte(nullString) // IPNet wraps net.IPNet to get CIDR serialization. // // +kubebuilder:validation:Type=string +// +kubebuilder:validation:MaxLength=43 +// +kubebuilder:validation:XValidation:rule=`self.matches('^((\\d{1,3}\\.){3}\\d{1,3}/\\d{1,2})$') || self.matches('^([0-9a-fA-F]{0,4}:){2,7}([0-9a-fA-F]{0,4})?/[0-9]{1,3}$')`,message="cidr must be a valid IPv4 or IPv6 CIDR notation (e.g., 192.168.1.0/24 or 2001:db8::/64)" type IPNet net.IPNet type IPNets []IPNet diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/aescbcspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/aescbcspec.go deleted file mode 100644 index 5264469a7..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/aescbcspec.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" -) - -// AESCBCSpecApplyConfiguration represents an declarative configuration of the AESCBCSpec type for use -// with apply. -type AESCBCSpecApplyConfiguration struct { - ActiveKey *v1.LocalObjectReference `json:"activeKey,omitempty"` - BackupKey *v1.LocalObjectReference `json:"backupKey,omitempty"` -} - -// AESCBCSpecApplyConfiguration constructs an declarative configuration of the AESCBCSpec type for use with -// apply. -func AESCBCSpec() *AESCBCSpecApplyConfiguration { - return &AESCBCSpecApplyConfiguration{} -} - -// WithActiveKey sets the ActiveKey field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ActiveKey field is set to the value of the last call. -func (b *AESCBCSpecApplyConfiguration) WithActiveKey(value v1.LocalObjectReference) *AESCBCSpecApplyConfiguration { - b.ActiveKey = &value - return b -} - -// WithBackupKey sets the BackupKey field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the BackupKey field is set to the value of the last call. -func (b *AESCBCSpecApplyConfiguration) WithBackupKey(value v1.LocalObjectReference) *AESCBCSpecApplyConfiguration { - b.BackupKey = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/agentnodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/agentnodepoolplatform.go deleted file mode 100644 index b10691e86..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/agentnodepoolplatform.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// AgentNodePoolPlatformApplyConfiguration represents an declarative configuration of the AgentNodePoolPlatform type for use -// with apply. -type AgentNodePoolPlatformApplyConfiguration struct { - AgentLabelSelector *v1.LabelSelectorApplyConfiguration `json:"agentLabelSelector,omitempty"` -} - -// AgentNodePoolPlatformApplyConfiguration constructs an declarative configuration of the AgentNodePoolPlatform type for use with -// apply. -func AgentNodePoolPlatform() *AgentNodePoolPlatformApplyConfiguration { - return &AgentNodePoolPlatformApplyConfiguration{} -} - -// WithAgentLabelSelector sets the AgentLabelSelector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AgentLabelSelector field is set to the value of the last call. -func (b *AgentNodePoolPlatformApplyConfiguration) WithAgentLabelSelector(value *v1.LabelSelectorApplyConfiguration) *AgentNodePoolPlatformApplyConfiguration { - b.AgentLabelSelector = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/agentplatformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/agentplatformspec.go deleted file mode 100644 index c59378c11..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/agentplatformspec.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AgentPlatformSpecApplyConfiguration represents an declarative configuration of the AgentPlatformSpec type for use -// with apply. -type AgentPlatformSpecApplyConfiguration struct { - AgentNamespace *string `json:"agentNamespace,omitempty"` -} - -// AgentPlatformSpecApplyConfiguration constructs an declarative configuration of the AgentPlatformSpec type for use with -// apply. -func AgentPlatformSpec() *AgentPlatformSpecApplyConfiguration { - return &AgentPlatformSpecApplyConfiguration{} -} - -// WithAgentNamespace sets the AgentNamespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AgentNamespace field is set to the value of the last call. -func (b *AgentPlatformSpecApplyConfiguration) WithAgentNamespace(value string) *AgentPlatformSpecApplyConfiguration { - b.AgentNamespace = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/apiendpoint.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/apiendpoint.go deleted file mode 100644 index f83a750b9..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/apiendpoint.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// APIEndpointApplyConfiguration represents an declarative configuration of the APIEndpoint type for use -// with apply. -type APIEndpointApplyConfiguration struct { - Host *string `json:"host,omitempty"` - Port *int32 `json:"port,omitempty"` -} - -// APIEndpointApplyConfiguration constructs an declarative configuration of the APIEndpoint type for use with -// apply. -func APIEndpoint() *APIEndpointApplyConfiguration { - return &APIEndpointApplyConfiguration{} -} - -// WithHost sets the Host field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Host field is set to the value of the last call. -func (b *APIEndpointApplyConfiguration) WithHost(value string) *APIEndpointApplyConfiguration { - b.Host = &value - return b -} - -// WithPort sets the Port field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Port field is set to the value of the last call. -func (b *APIEndpointApplyConfiguration) WithPort(value int32) *APIEndpointApplyConfiguration { - b.Port = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/apiservernetworking.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/apiservernetworking.go deleted file mode 100644 index e16742e4d..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/apiservernetworking.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// APIServerNetworkingApplyConfiguration represents an declarative configuration of the APIServerNetworking type for use -// with apply. -type APIServerNetworkingApplyConfiguration struct { - AdvertiseAddress *string `json:"advertiseAddress,omitempty"` - Port *int32 `json:"port,omitempty"` - AllowedCIDRBlocks []v1alpha1.CIDRBlock `json:"allowedCIDRBlocks,omitempty"` -} - -// APIServerNetworkingApplyConfiguration constructs an declarative configuration of the APIServerNetworking type for use with -// apply. -func APIServerNetworking() *APIServerNetworkingApplyConfiguration { - return &APIServerNetworkingApplyConfiguration{} -} - -// WithAdvertiseAddress sets the AdvertiseAddress field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AdvertiseAddress field is set to the value of the last call. -func (b *APIServerNetworkingApplyConfiguration) WithAdvertiseAddress(value string) *APIServerNetworkingApplyConfiguration { - b.AdvertiseAddress = &value - return b -} - -// WithPort sets the Port field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Port field is set to the value of the last call. -func (b *APIServerNetworkingApplyConfiguration) WithPort(value int32) *APIServerNetworkingApplyConfiguration { - b.Port = &value - return b -} - -// WithAllowedCIDRBlocks adds the given value to the AllowedCIDRBlocks field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCIDRBlocks field. -func (b *APIServerNetworkingApplyConfiguration) WithAllowedCIDRBlocks(values ...v1alpha1.CIDRBlock) *APIServerNetworkingApplyConfiguration { - for i := range values { - b.AllowedCIDRBlocks = append(b.AllowedCIDRBlocks, values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awscloudproviderconfig.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awscloudproviderconfig.go deleted file mode 100644 index bcc846e0f..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awscloudproviderconfig.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AWSCloudProviderConfigApplyConfiguration represents an declarative configuration of the AWSCloudProviderConfig type for use -// with apply. -type AWSCloudProviderConfigApplyConfiguration struct { - Subnet *AWSResourceReferenceApplyConfiguration `json:"subnet,omitempty"` - Zone *string `json:"zone,omitempty"` - VPC *string `json:"vpc,omitempty"` -} - -// AWSCloudProviderConfigApplyConfiguration constructs an declarative configuration of the AWSCloudProviderConfig type for use with -// apply. -func AWSCloudProviderConfig() *AWSCloudProviderConfigApplyConfiguration { - return &AWSCloudProviderConfigApplyConfiguration{} -} - -// WithSubnet sets the Subnet field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Subnet field is set to the value of the last call. -func (b *AWSCloudProviderConfigApplyConfiguration) WithSubnet(value *AWSResourceReferenceApplyConfiguration) *AWSCloudProviderConfigApplyConfiguration { - b.Subnet = value - return b -} - -// WithZone sets the Zone field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Zone field is set to the value of the last call. -func (b *AWSCloudProviderConfigApplyConfiguration) WithZone(value string) *AWSCloudProviderConfigApplyConfiguration { - b.Zone = &value - return b -} - -// WithVPC sets the VPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VPC field is set to the value of the last call. -func (b *AWSCloudProviderConfigApplyConfiguration) WithVPC(value string) *AWSCloudProviderConfigApplyConfiguration { - b.VPC = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmsauthspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmsauthspec.go deleted file mode 100644 index a53ad9893..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmsauthspec.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" -) - -// AWSKMSAuthSpecApplyConfiguration represents an declarative configuration of the AWSKMSAuthSpec type for use -// with apply. -type AWSKMSAuthSpecApplyConfiguration struct { - Credentials *v1.LocalObjectReference `json:"credentials,omitempty"` - AWSKMSRoleARN *string `json:"awsKms,omitempty"` -} - -// AWSKMSAuthSpecApplyConfiguration constructs an declarative configuration of the AWSKMSAuthSpec type for use with -// apply. -func AWSKMSAuthSpec() *AWSKMSAuthSpecApplyConfiguration { - return &AWSKMSAuthSpecApplyConfiguration{} -} - -// WithCredentials sets the Credentials field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Credentials field is set to the value of the last call. -func (b *AWSKMSAuthSpecApplyConfiguration) WithCredentials(value v1.LocalObjectReference) *AWSKMSAuthSpecApplyConfiguration { - b.Credentials = &value - return b -} - -// WithAWSKMSRoleARN sets the AWSKMSRoleARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AWSKMSRoleARN field is set to the value of the last call. -func (b *AWSKMSAuthSpecApplyConfiguration) WithAWSKMSRoleARN(value string) *AWSKMSAuthSpecApplyConfiguration { - b.AWSKMSRoleARN = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmskeyentry.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmskeyentry.go deleted file mode 100644 index 35ed1fa03..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmskeyentry.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AWSKMSKeyEntryApplyConfiguration represents an declarative configuration of the AWSKMSKeyEntry type for use -// with apply. -type AWSKMSKeyEntryApplyConfiguration struct { - ARN *string `json:"arn,omitempty"` -} - -// AWSKMSKeyEntryApplyConfiguration constructs an declarative configuration of the AWSKMSKeyEntry type for use with -// apply. -func AWSKMSKeyEntry() *AWSKMSKeyEntryApplyConfiguration { - return &AWSKMSKeyEntryApplyConfiguration{} -} - -// WithARN sets the ARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ARN field is set to the value of the last call. -func (b *AWSKMSKeyEntryApplyConfiguration) WithARN(value string) *AWSKMSKeyEntryApplyConfiguration { - b.ARN = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmsspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmsspec.go deleted file mode 100644 index b487f058e..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awskmsspec.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AWSKMSSpecApplyConfiguration represents an declarative configuration of the AWSKMSSpec type for use -// with apply. -type AWSKMSSpecApplyConfiguration struct { - Region *string `json:"region,omitempty"` - ActiveKey *AWSKMSKeyEntryApplyConfiguration `json:"activeKey,omitempty"` - BackupKey *AWSKMSKeyEntryApplyConfiguration `json:"backupKey,omitempty"` - Auth *AWSKMSAuthSpecApplyConfiguration `json:"auth,omitempty"` -} - -// AWSKMSSpecApplyConfiguration constructs an declarative configuration of the AWSKMSSpec type for use with -// apply. -func AWSKMSSpec() *AWSKMSSpecApplyConfiguration { - return &AWSKMSSpecApplyConfiguration{} -} - -// WithRegion sets the Region field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Region field is set to the value of the last call. -func (b *AWSKMSSpecApplyConfiguration) WithRegion(value string) *AWSKMSSpecApplyConfiguration { - b.Region = &value - return b -} - -// WithActiveKey sets the ActiveKey field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ActiveKey field is set to the value of the last call. -func (b *AWSKMSSpecApplyConfiguration) WithActiveKey(value *AWSKMSKeyEntryApplyConfiguration) *AWSKMSSpecApplyConfiguration { - b.ActiveKey = value - return b -} - -// WithBackupKey sets the BackupKey field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the BackupKey field is set to the value of the last call. -func (b *AWSKMSSpecApplyConfiguration) WithBackupKey(value *AWSKMSKeyEntryApplyConfiguration) *AWSKMSSpecApplyConfiguration { - b.BackupKey = value - return b -} - -// WithAuth sets the Auth field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Auth field is set to the value of the last call. -func (b *AWSKMSSpecApplyConfiguration) WithAuth(value *AWSKMSAuthSpecApplyConfiguration) *AWSKMSSpecApplyConfiguration { - b.Auth = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsnodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsnodepoolplatform.go deleted file mode 100644 index 94206fbcd..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsnodepoolplatform.go +++ /dev/null @@ -1,102 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AWSNodePoolPlatformApplyConfiguration represents an declarative configuration of the AWSNodePoolPlatform type for use -// with apply. -type AWSNodePoolPlatformApplyConfiguration struct { - InstanceType *string `json:"instanceType,omitempty"` - InstanceProfile *string `json:"instanceProfile,omitempty"` - Subnet *AWSResourceReferenceApplyConfiguration `json:"subnet,omitempty"` - AMI *string `json:"ami,omitempty"` - SecurityGroups []AWSResourceReferenceApplyConfiguration `json:"securityGroups,omitempty"` - RootVolume *VolumeApplyConfiguration `json:"rootVolume,omitempty"` - ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` -} - -// AWSNodePoolPlatformApplyConfiguration constructs an declarative configuration of the AWSNodePoolPlatform type for use with -// apply. -func AWSNodePoolPlatform() *AWSNodePoolPlatformApplyConfiguration { - return &AWSNodePoolPlatformApplyConfiguration{} -} - -// WithInstanceType sets the InstanceType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InstanceType field is set to the value of the last call. -func (b *AWSNodePoolPlatformApplyConfiguration) WithInstanceType(value string) *AWSNodePoolPlatformApplyConfiguration { - b.InstanceType = &value - return b -} - -// WithInstanceProfile sets the InstanceProfile field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InstanceProfile field is set to the value of the last call. -func (b *AWSNodePoolPlatformApplyConfiguration) WithInstanceProfile(value string) *AWSNodePoolPlatformApplyConfiguration { - b.InstanceProfile = &value - return b -} - -// WithSubnet sets the Subnet field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Subnet field is set to the value of the last call. -func (b *AWSNodePoolPlatformApplyConfiguration) WithSubnet(value *AWSResourceReferenceApplyConfiguration) *AWSNodePoolPlatformApplyConfiguration { - b.Subnet = value - return b -} - -// WithAMI sets the AMI field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AMI field is set to the value of the last call. -func (b *AWSNodePoolPlatformApplyConfiguration) WithAMI(value string) *AWSNodePoolPlatformApplyConfiguration { - b.AMI = &value - return b -} - -// WithSecurityGroups adds the given value to the SecurityGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the SecurityGroups field. -func (b *AWSNodePoolPlatformApplyConfiguration) WithSecurityGroups(values ...*AWSResourceReferenceApplyConfiguration) *AWSNodePoolPlatformApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithSecurityGroups") - } - b.SecurityGroups = append(b.SecurityGroups, *values[i]) - } - return b -} - -// WithRootVolume sets the RootVolume field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RootVolume field is set to the value of the last call. -func (b *AWSNodePoolPlatformApplyConfiguration) WithRootVolume(value *VolumeApplyConfiguration) *AWSNodePoolPlatformApplyConfiguration { - b.RootVolume = value - return b -} - -// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ResourceTags field. -func (b *AWSNodePoolPlatformApplyConfiguration) WithResourceTags(values ...*AWSResourceTagApplyConfiguration) *AWSNodePoolPlatformApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithResourceTags") - } - b.ResourceTags = append(b.ResourceTags, *values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsplatformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsplatformspec.go deleted file mode 100644 index 55079038d..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsplatformspec.go +++ /dev/null @@ -1,159 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - v1 "k8s.io/api/core/v1" -) - -// AWSPlatformSpecApplyConfiguration represents an declarative configuration of the AWSPlatformSpec type for use -// with apply. -type AWSPlatformSpecApplyConfiguration struct { - Region *string `json:"region,omitempty"` - CloudProviderConfig *AWSCloudProviderConfigApplyConfiguration `json:"cloudProviderConfig,omitempty"` - ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` - RolesRef *AWSRolesRefApplyConfiguration `json:"rolesRef,omitempty"` - Roles []AWSRoleCredentialsApplyConfiguration `json:"roles,omitempty"` - KubeCloudControllerCreds *v1.LocalObjectReference `json:"kubeCloudControllerCreds,omitempty"` - NodePoolManagementCreds *v1.LocalObjectReference `json:"nodePoolManagementCreds,omitempty"` - ControlPlaneOperatorCreds *v1.LocalObjectReference `json:"controlPlaneOperatorCreds,omitempty"` - ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` - EndpointAccess *hypershiftv1alpha1.AWSEndpointAccessType `json:"endpointAccess,omitempty"` - AdditionalAllowedPrincipals []string `json:"additionalAllowedPrincipals,omitempty"` - MultiArch *bool `json:"multiArch,omitempty"` -} - -// AWSPlatformSpecApplyConfiguration constructs an declarative configuration of the AWSPlatformSpec type for use with -// apply. -func AWSPlatformSpec() *AWSPlatformSpecApplyConfiguration { - return &AWSPlatformSpecApplyConfiguration{} -} - -// WithRegion sets the Region field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Region field is set to the value of the last call. -func (b *AWSPlatformSpecApplyConfiguration) WithRegion(value string) *AWSPlatformSpecApplyConfiguration { - b.Region = &value - return b -} - -// WithCloudProviderConfig sets the CloudProviderConfig field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CloudProviderConfig field is set to the value of the last call. -func (b *AWSPlatformSpecApplyConfiguration) WithCloudProviderConfig(value *AWSCloudProviderConfigApplyConfiguration) *AWSPlatformSpecApplyConfiguration { - b.CloudProviderConfig = value - return b -} - -// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field. -func (b *AWSPlatformSpecApplyConfiguration) WithServiceEndpoints(values ...*AWSServiceEndpointApplyConfiguration) *AWSPlatformSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithServiceEndpoints") - } - b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i]) - } - return b -} - -// WithRolesRef sets the RolesRef field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RolesRef field is set to the value of the last call. -func (b *AWSPlatformSpecApplyConfiguration) WithRolesRef(value *AWSRolesRefApplyConfiguration) *AWSPlatformSpecApplyConfiguration { - b.RolesRef = value - return b -} - -// WithRoles adds the given value to the Roles field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Roles field. -func (b *AWSPlatformSpecApplyConfiguration) WithRoles(values ...*AWSRoleCredentialsApplyConfiguration) *AWSPlatformSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRoles") - } - b.Roles = append(b.Roles, *values[i]) - } - return b -} - -// WithKubeCloudControllerCreds sets the KubeCloudControllerCreds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KubeCloudControllerCreds field is set to the value of the last call. -func (b *AWSPlatformSpecApplyConfiguration) WithKubeCloudControllerCreds(value v1.LocalObjectReference) *AWSPlatformSpecApplyConfiguration { - b.KubeCloudControllerCreds = &value - return b -} - -// WithNodePoolManagementCreds sets the NodePoolManagementCreds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodePoolManagementCreds field is set to the value of the last call. -func (b *AWSPlatformSpecApplyConfiguration) WithNodePoolManagementCreds(value v1.LocalObjectReference) *AWSPlatformSpecApplyConfiguration { - b.NodePoolManagementCreds = &value - return b -} - -// WithControlPlaneOperatorCreds sets the ControlPlaneOperatorCreds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ControlPlaneOperatorCreds field is set to the value of the last call. -func (b *AWSPlatformSpecApplyConfiguration) WithControlPlaneOperatorCreds(value v1.LocalObjectReference) *AWSPlatformSpecApplyConfiguration { - b.ControlPlaneOperatorCreds = &value - return b -} - -// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ResourceTags field. -func (b *AWSPlatformSpecApplyConfiguration) WithResourceTags(values ...*AWSResourceTagApplyConfiguration) *AWSPlatformSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithResourceTags") - } - b.ResourceTags = append(b.ResourceTags, *values[i]) - } - return b -} - -// WithEndpointAccess sets the EndpointAccess field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the EndpointAccess field is set to the value of the last call. -func (b *AWSPlatformSpecApplyConfiguration) WithEndpointAccess(value hypershiftv1alpha1.AWSEndpointAccessType) *AWSPlatformSpecApplyConfiguration { - b.EndpointAccess = &value - return b -} - -// WithAdditionalAllowedPrincipals adds the given value to the AdditionalAllowedPrincipals field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AdditionalAllowedPrincipals field. -func (b *AWSPlatformSpecApplyConfiguration) WithAdditionalAllowedPrincipals(values ...string) *AWSPlatformSpecApplyConfiguration { - for i := range values { - b.AdditionalAllowedPrincipals = append(b.AdditionalAllowedPrincipals, values[i]) - } - return b -} - -// WithMultiArch sets the MultiArch field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MultiArch field is set to the value of the last call. -func (b *AWSPlatformSpecApplyConfiguration) WithMultiArch(value bool) *AWSPlatformSpecApplyConfiguration { - b.MultiArch = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsplatformstatus.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsplatformstatus.go deleted file mode 100644 index 4a71a0d24..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsplatformstatus.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AWSPlatformStatusApplyConfiguration represents an declarative configuration of the AWSPlatformStatus type for use -// with apply. -type AWSPlatformStatusApplyConfiguration struct { - DefaultWorkerSecurityGroupID *string `json:"defaultWorkerSecurityGroupID,omitempty"` -} - -// AWSPlatformStatusApplyConfiguration constructs an declarative configuration of the AWSPlatformStatus type for use with -// apply. -func AWSPlatformStatus() *AWSPlatformStatusApplyConfiguration { - return &AWSPlatformStatusApplyConfiguration{} -} - -// WithDefaultWorkerSecurityGroupID sets the DefaultWorkerSecurityGroupID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultWorkerSecurityGroupID field is set to the value of the last call. -func (b *AWSPlatformStatusApplyConfiguration) WithDefaultWorkerSecurityGroupID(value string) *AWSPlatformStatusApplyConfiguration { - b.DefaultWorkerSecurityGroupID = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsresourcereference.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsresourcereference.go deleted file mode 100644 index f8b8558bc..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsresourcereference.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AWSResourceReferenceApplyConfiguration represents an declarative configuration of the AWSResourceReference type for use -// with apply. -type AWSResourceReferenceApplyConfiguration struct { - ID *string `json:"id,omitempty"` - Filters []FilterApplyConfiguration `json:"filters,omitempty"` -} - -// AWSResourceReferenceApplyConfiguration constructs an declarative configuration of the AWSResourceReference type for use with -// apply. -func AWSResourceReference() *AWSResourceReferenceApplyConfiguration { - return &AWSResourceReferenceApplyConfiguration{} -} - -// WithID sets the ID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ID field is set to the value of the last call. -func (b *AWSResourceReferenceApplyConfiguration) WithID(value string) *AWSResourceReferenceApplyConfiguration { - b.ID = &value - return b -} - -// WithFilters adds the given value to the Filters field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Filters field. -func (b *AWSResourceReferenceApplyConfiguration) WithFilters(values ...*FilterApplyConfiguration) *AWSResourceReferenceApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithFilters") - } - b.Filters = append(b.Filters, *values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsresourcetag.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsresourcetag.go deleted file mode 100644 index 1e5a3f98b..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsresourcetag.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AWSResourceTagApplyConfiguration represents an declarative configuration of the AWSResourceTag type for use -// with apply. -type AWSResourceTagApplyConfiguration struct { - Key *string `json:"key,omitempty"` - Value *string `json:"value,omitempty"` -} - -// AWSResourceTagApplyConfiguration constructs an declarative configuration of the AWSResourceTag type for use with -// apply. -func AWSResourceTag() *AWSResourceTagApplyConfiguration { - return &AWSResourceTagApplyConfiguration{} -} - -// WithKey sets the Key field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Key field is set to the value of the last call. -func (b *AWSResourceTagApplyConfiguration) WithKey(value string) *AWSResourceTagApplyConfiguration { - b.Key = &value - return b -} - -// WithValue sets the Value field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Value field is set to the value of the last call. -func (b *AWSResourceTagApplyConfiguration) WithValue(value string) *AWSResourceTagApplyConfiguration { - b.Value = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsrolecredentials.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsrolecredentials.go deleted file mode 100644 index f8c1a48bb..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsrolecredentials.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AWSRoleCredentialsApplyConfiguration represents an declarative configuration of the AWSRoleCredentials type for use -// with apply. -type AWSRoleCredentialsApplyConfiguration struct { - ARN *string `json:"arn,omitempty"` - Namespace *string `json:"namespace,omitempty"` - Name *string `json:"name,omitempty"` -} - -// AWSRoleCredentialsApplyConfiguration constructs an declarative configuration of the AWSRoleCredentials type for use with -// apply. -func AWSRoleCredentials() *AWSRoleCredentialsApplyConfiguration { - return &AWSRoleCredentialsApplyConfiguration{} -} - -// WithARN sets the ARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ARN field is set to the value of the last call. -func (b *AWSRoleCredentialsApplyConfiguration) WithARN(value string) *AWSRoleCredentialsApplyConfiguration { - b.ARN = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *AWSRoleCredentialsApplyConfiguration) WithNamespace(value string) *AWSRoleCredentialsApplyConfiguration { - b.Namespace = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *AWSRoleCredentialsApplyConfiguration) WithName(value string) *AWSRoleCredentialsApplyConfiguration { - b.Name = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsrolesref.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsrolesref.go deleted file mode 100644 index 3630d7c19..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsrolesref.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AWSRolesRefApplyConfiguration represents an declarative configuration of the AWSRolesRef type for use -// with apply. -type AWSRolesRefApplyConfiguration struct { - IngressARN *string `json:"ingressARN,omitempty"` - ImageRegistryARN *string `json:"imageRegistryARN,omitempty"` - StorageARN *string `json:"storageARN,omitempty"` - NetworkARN *string `json:"networkARN,omitempty"` - KubeCloudControllerARN *string `json:"kubeCloudControllerARN,omitempty"` - NodePoolManagementARN *string `json:"nodePoolManagementARN,omitempty"` - ControlPlaneOperatorARN *string `json:"controlPlaneOperatorARN,omitempty"` -} - -// AWSRolesRefApplyConfiguration constructs an declarative configuration of the AWSRolesRef type for use with -// apply. -func AWSRolesRef() *AWSRolesRefApplyConfiguration { - return &AWSRolesRefApplyConfiguration{} -} - -// WithIngressARN sets the IngressARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IngressARN field is set to the value of the last call. -func (b *AWSRolesRefApplyConfiguration) WithIngressARN(value string) *AWSRolesRefApplyConfiguration { - b.IngressARN = &value - return b -} - -// WithImageRegistryARN sets the ImageRegistryARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ImageRegistryARN field is set to the value of the last call. -func (b *AWSRolesRefApplyConfiguration) WithImageRegistryARN(value string) *AWSRolesRefApplyConfiguration { - b.ImageRegistryARN = &value - return b -} - -// WithStorageARN sets the StorageARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StorageARN field is set to the value of the last call. -func (b *AWSRolesRefApplyConfiguration) WithStorageARN(value string) *AWSRolesRefApplyConfiguration { - b.StorageARN = &value - return b -} - -// WithNetworkARN sets the NetworkARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NetworkARN field is set to the value of the last call. -func (b *AWSRolesRefApplyConfiguration) WithNetworkARN(value string) *AWSRolesRefApplyConfiguration { - b.NetworkARN = &value - return b -} - -// WithKubeCloudControllerARN sets the KubeCloudControllerARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KubeCloudControllerARN field is set to the value of the last call. -func (b *AWSRolesRefApplyConfiguration) WithKubeCloudControllerARN(value string) *AWSRolesRefApplyConfiguration { - b.KubeCloudControllerARN = &value - return b -} - -// WithNodePoolManagementARN sets the NodePoolManagementARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodePoolManagementARN field is set to the value of the last call. -func (b *AWSRolesRefApplyConfiguration) WithNodePoolManagementARN(value string) *AWSRolesRefApplyConfiguration { - b.NodePoolManagementARN = &value - return b -} - -// WithControlPlaneOperatorARN sets the ControlPlaneOperatorARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ControlPlaneOperatorARN field is set to the value of the last call. -func (b *AWSRolesRefApplyConfiguration) WithControlPlaneOperatorARN(value string) *AWSRolesRefApplyConfiguration { - b.ControlPlaneOperatorARN = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsserviceendpoint.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsserviceendpoint.go deleted file mode 100644 index f4d3051b8..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/awsserviceendpoint.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AWSServiceEndpointApplyConfiguration represents an declarative configuration of the AWSServiceEndpoint type for use -// with apply. -type AWSServiceEndpointApplyConfiguration struct { - Name *string `json:"name,omitempty"` - URL *string `json:"url,omitempty"` -} - -// AWSServiceEndpointApplyConfiguration constructs an declarative configuration of the AWSServiceEndpoint type for use with -// apply. -func AWSServiceEndpoint() *AWSServiceEndpointApplyConfiguration { - return &AWSServiceEndpointApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *AWSServiceEndpointApplyConfiguration) WithName(value string) *AWSServiceEndpointApplyConfiguration { - b.Name = &value - return b -} - -// WithURL sets the URL field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the URL field is set to the value of the last call. -func (b *AWSServiceEndpointApplyConfiguration) WithURL(value string) *AWSServiceEndpointApplyConfiguration { - b.URL = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurekmskey.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurekmskey.go deleted file mode 100644 index e10a5f8ce..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurekmskey.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AzureKMSKeyApplyConfiguration represents an declarative configuration of the AzureKMSKey type for use -// with apply. -type AzureKMSKeyApplyConfiguration struct { - KeyVaultName *string `json:"keyVaultName,omitempty"` - KeyName *string `json:"keyName,omitempty"` - KeyVersion *string `json:"keyVersion,omitempty"` -} - -// AzureKMSKeyApplyConfiguration constructs an declarative configuration of the AzureKMSKey type for use with -// apply. -func AzureKMSKey() *AzureKMSKeyApplyConfiguration { - return &AzureKMSKeyApplyConfiguration{} -} - -// WithKeyVaultName sets the KeyVaultName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KeyVaultName field is set to the value of the last call. -func (b *AzureKMSKeyApplyConfiguration) WithKeyVaultName(value string) *AzureKMSKeyApplyConfiguration { - b.KeyVaultName = &value - return b -} - -// WithKeyName sets the KeyName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KeyName field is set to the value of the last call. -func (b *AzureKMSKeyApplyConfiguration) WithKeyName(value string) *AzureKMSKeyApplyConfiguration { - b.KeyName = &value - return b -} - -// WithKeyVersion sets the KeyVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KeyVersion field is set to the value of the last call. -func (b *AzureKMSKeyApplyConfiguration) WithKeyVersion(value string) *AzureKMSKeyApplyConfiguration { - b.KeyVersion = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurekmsspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurekmsspec.go deleted file mode 100644 index fc1e27e35..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurekmsspec.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AzureKMSSpecApplyConfiguration represents an declarative configuration of the AzureKMSSpec type for use -// with apply. -type AzureKMSSpecApplyConfiguration struct { - ActiveKey *AzureKMSKeyApplyConfiguration `json:"activeKey,omitempty"` - BackupKey *AzureKMSKeyApplyConfiguration `json:"backupKey,omitempty"` -} - -// AzureKMSSpecApplyConfiguration constructs an declarative configuration of the AzureKMSSpec type for use with -// apply. -func AzureKMSSpec() *AzureKMSSpecApplyConfiguration { - return &AzureKMSSpecApplyConfiguration{} -} - -// WithActiveKey sets the ActiveKey field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ActiveKey field is set to the value of the last call. -func (b *AzureKMSSpecApplyConfiguration) WithActiveKey(value *AzureKMSKeyApplyConfiguration) *AzureKMSSpecApplyConfiguration { - b.ActiveKey = value - return b -} - -// WithBackupKey sets the BackupKey field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the BackupKey field is set to the value of the last call. -func (b *AzureKMSSpecApplyConfiguration) WithBackupKey(value *AzureKMSKeyApplyConfiguration) *AzureKMSSpecApplyConfiguration { - b.BackupKey = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurenodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurenodepoolplatform.go deleted file mode 100644 index ec40ce430..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azurenodepoolplatform.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// AzureNodePoolPlatformApplyConfiguration represents an declarative configuration of the AzureNodePoolPlatform type for use -// with apply. -type AzureNodePoolPlatformApplyConfiguration struct { - VMSize *string `json:"vmsize,omitempty"` - ImageID *string `json:"imageID,omitempty"` - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - DiskStorageAccountType *string `json:"diskStorageAccountType,omitempty"` - AvailabilityZone *string `json:"availabilityZone,omitempty"` - DiskEncryptionSetID *string `json:"diskEncryptionSetID,omitempty"` - EnableEphemeralOSDisk *bool `json:"enableEphemeralOSDisk,omitempty"` - SubnetID *string `json:"subnetID,omitempty"` - Diagnostics *DiagnosticsApplyConfiguration `json:"diagnostics,omitempty"` -} - -// AzureNodePoolPlatformApplyConfiguration constructs an declarative configuration of the AzureNodePoolPlatform type for use with -// apply. -func AzureNodePoolPlatform() *AzureNodePoolPlatformApplyConfiguration { - return &AzureNodePoolPlatformApplyConfiguration{} -} - -// WithVMSize sets the VMSize field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VMSize field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithVMSize(value string) *AzureNodePoolPlatformApplyConfiguration { - b.VMSize = &value - return b -} - -// WithImageID sets the ImageID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ImageID field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithImageID(value string) *AzureNodePoolPlatformApplyConfiguration { - b.ImageID = &value - return b -} - -// WithDiskSizeGB sets the DiskSizeGB field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DiskSizeGB field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithDiskSizeGB(value int32) *AzureNodePoolPlatformApplyConfiguration { - b.DiskSizeGB = &value - return b -} - -// WithDiskStorageAccountType sets the DiskStorageAccountType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DiskStorageAccountType field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithDiskStorageAccountType(value string) *AzureNodePoolPlatformApplyConfiguration { - b.DiskStorageAccountType = &value - return b -} - -// WithAvailabilityZone sets the AvailabilityZone field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AvailabilityZone field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithAvailabilityZone(value string) *AzureNodePoolPlatformApplyConfiguration { - b.AvailabilityZone = &value - return b -} - -// WithDiskEncryptionSetID sets the DiskEncryptionSetID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DiskEncryptionSetID field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithDiskEncryptionSetID(value string) *AzureNodePoolPlatformApplyConfiguration { - b.DiskEncryptionSetID = &value - return b -} - -// WithEnableEphemeralOSDisk sets the EnableEphemeralOSDisk field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the EnableEphemeralOSDisk field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithEnableEphemeralOSDisk(value bool) *AzureNodePoolPlatformApplyConfiguration { - b.EnableEphemeralOSDisk = &value - return b -} - -// WithSubnetID sets the SubnetID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SubnetID field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithSubnetID(value string) *AzureNodePoolPlatformApplyConfiguration { - b.SubnetID = &value - return b -} - -// WithDiagnostics sets the Diagnostics field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Diagnostics field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithDiagnostics(value *DiagnosticsApplyConfiguration) *AzureNodePoolPlatformApplyConfiguration { - b.Diagnostics = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azureplatformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azureplatformspec.go deleted file mode 100644 index 1a6eed267..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/azureplatformspec.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" -) - -// AzurePlatformSpecApplyConfiguration represents an declarative configuration of the AzurePlatformSpec type for use -// with apply. -type AzurePlatformSpecApplyConfiguration struct { - Credentials *v1.LocalObjectReference `json:"credentials,omitempty"` - Cloud *string `json:"cloud,omitempty"` - Location *string `json:"location,omitempty"` - ResourceGroupName *string `json:"resourceGroup,omitempty"` - VnetID *string `json:"vnetID,omitempty"` - SubnetID *string `json:"subnetID,omitempty"` - SubscriptionID *string `json:"subscriptionID,omitempty"` - MachineIdentityID *string `json:"machineIdentityID,omitempty"` - SecurityGroupID *string `json:"securityGroupID,omitempty"` -} - -// AzurePlatformSpecApplyConfiguration constructs an declarative configuration of the AzurePlatformSpec type for use with -// apply. -func AzurePlatformSpec() *AzurePlatformSpecApplyConfiguration { - return &AzurePlatformSpecApplyConfiguration{} -} - -// WithCredentials sets the Credentials field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Credentials field is set to the value of the last call. -func (b *AzurePlatformSpecApplyConfiguration) WithCredentials(value v1.LocalObjectReference) *AzurePlatformSpecApplyConfiguration { - b.Credentials = &value - return b -} - -// WithCloud sets the Cloud field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Cloud field is set to the value of the last call. -func (b *AzurePlatformSpecApplyConfiguration) WithCloud(value string) *AzurePlatformSpecApplyConfiguration { - b.Cloud = &value - return b -} - -// WithLocation sets the Location field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Location field is set to the value of the last call. -func (b *AzurePlatformSpecApplyConfiguration) WithLocation(value string) *AzurePlatformSpecApplyConfiguration { - b.Location = &value - return b -} - -// WithResourceGroupName sets the ResourceGroupName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceGroupName field is set to the value of the last call. -func (b *AzurePlatformSpecApplyConfiguration) WithResourceGroupName(value string) *AzurePlatformSpecApplyConfiguration { - b.ResourceGroupName = &value - return b -} - -// WithVnetID sets the VnetID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VnetID field is set to the value of the last call. -func (b *AzurePlatformSpecApplyConfiguration) WithVnetID(value string) *AzurePlatformSpecApplyConfiguration { - b.VnetID = &value - return b -} - -// WithSubnetID sets the SubnetID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SubnetID field is set to the value of the last call. -func (b *AzurePlatformSpecApplyConfiguration) WithSubnetID(value string) *AzurePlatformSpecApplyConfiguration { - b.SubnetID = &value - return b -} - -// WithSubscriptionID sets the SubscriptionID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SubscriptionID field is set to the value of the last call. -func (b *AzurePlatformSpecApplyConfiguration) WithSubscriptionID(value string) *AzurePlatformSpecApplyConfiguration { - b.SubscriptionID = &value - return b -} - -// WithMachineIdentityID sets the MachineIdentityID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MachineIdentityID field is set to the value of the last call. -func (b *AzurePlatformSpecApplyConfiguration) WithMachineIdentityID(value string) *AzurePlatformSpecApplyConfiguration { - b.MachineIdentityID = &value - return b -} - -// WithSecurityGroupID sets the SecurityGroupID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SecurityGroupID field is set to the value of the last call. -func (b *AzurePlatformSpecApplyConfiguration) WithSecurityGroupID(value string) *AzurePlatformSpecApplyConfiguration { - b.SecurityGroupID = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterautoscaling.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterautoscaling.go deleted file mode 100644 index 3c95e8701..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterautoscaling.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// ClusterAutoscalingApplyConfiguration represents an declarative configuration of the ClusterAutoscaling type for use -// with apply. -type ClusterAutoscalingApplyConfiguration struct { - MaxNodesTotal *int32 `json:"maxNodesTotal,omitempty"` - MaxPodGracePeriod *int32 `json:"maxPodGracePeriod,omitempty"` - MaxNodeProvisionTime *string `json:"maxNodeProvisionTime,omitempty"` - PodPriorityThreshold *int32 `json:"podPriorityThreshold,omitempty"` -} - -// ClusterAutoscalingApplyConfiguration constructs an declarative configuration of the ClusterAutoscaling type for use with -// apply. -func ClusterAutoscaling() *ClusterAutoscalingApplyConfiguration { - return &ClusterAutoscalingApplyConfiguration{} -} - -// WithMaxNodesTotal sets the MaxNodesTotal field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MaxNodesTotal field is set to the value of the last call. -func (b *ClusterAutoscalingApplyConfiguration) WithMaxNodesTotal(value int32) *ClusterAutoscalingApplyConfiguration { - b.MaxNodesTotal = &value - return b -} - -// WithMaxPodGracePeriod sets the MaxPodGracePeriod field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MaxPodGracePeriod field is set to the value of the last call. -func (b *ClusterAutoscalingApplyConfiguration) WithMaxPodGracePeriod(value int32) *ClusterAutoscalingApplyConfiguration { - b.MaxPodGracePeriod = &value - return b -} - -// WithMaxNodeProvisionTime sets the MaxNodeProvisionTime field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MaxNodeProvisionTime field is set to the value of the last call. -func (b *ClusterAutoscalingApplyConfiguration) WithMaxNodeProvisionTime(value string) *ClusterAutoscalingApplyConfiguration { - b.MaxNodeProvisionTime = &value - return b -} - -// WithPodPriorityThreshold sets the PodPriorityThreshold field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PodPriorityThreshold field is set to the value of the last call. -func (b *ClusterAutoscalingApplyConfiguration) WithPodPriorityThreshold(value int32) *ClusterAutoscalingApplyConfiguration { - b.PodPriorityThreshold = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterconfiguration.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterconfiguration.go deleted file mode 100644 index 4e0864527..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterconfiguration.go +++ /dev/null @@ -1,158 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - configv1 "github.com/openshift/api/config/v1" - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// ClusterConfigurationApplyConfiguration represents an declarative configuration of the ClusterConfiguration type for use -// with apply. -type ClusterConfigurationApplyConfiguration struct { - SecretRefs []v1.LocalObjectReference `json:"secretRefs,omitempty"` - ConfigMapRefs []v1.LocalObjectReference `json:"configMapRefs,omitempty"` - Items []runtime.RawExtension `json:"items,omitempty"` - APIServer *configv1.APIServerSpec `json:"apiServer,omitempty"` - Authentication *configv1.AuthenticationSpec `json:"authentication,omitempty"` - FeatureGate *configv1.FeatureGateSpec `json:"featureGate,omitempty"` - Image *configv1.ImageSpec `json:"image,omitempty"` - Ingress *configv1.IngressSpec `json:"ingress,omitempty"` - Network *configv1.NetworkSpec `json:"network,omitempty"` - OAuth *configv1.OAuthSpec `json:"oauth,omitempty"` - OperatorHub *configv1.OperatorHubSpec `json:"operatorhub,omitempty"` - Scheduler *configv1.SchedulerSpec `json:"scheduler,omitempty"` - Proxy *configv1.ProxySpec `json:"proxy,omitempty"` -} - -// ClusterConfigurationApplyConfiguration constructs an declarative configuration of the ClusterConfiguration type for use with -// apply. -func ClusterConfiguration() *ClusterConfigurationApplyConfiguration { - return &ClusterConfigurationApplyConfiguration{} -} - -// WithSecretRefs adds the given value to the SecretRefs field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the SecretRefs field. -func (b *ClusterConfigurationApplyConfiguration) WithSecretRefs(values ...v1.LocalObjectReference) *ClusterConfigurationApplyConfiguration { - for i := range values { - b.SecretRefs = append(b.SecretRefs, values[i]) - } - return b -} - -// WithConfigMapRefs adds the given value to the ConfigMapRefs field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ConfigMapRefs field. -func (b *ClusterConfigurationApplyConfiguration) WithConfigMapRefs(values ...v1.LocalObjectReference) *ClusterConfigurationApplyConfiguration { - for i := range values { - b.ConfigMapRefs = append(b.ConfigMapRefs, values[i]) - } - return b -} - -// WithItems adds the given value to the Items field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Items field. -func (b *ClusterConfigurationApplyConfiguration) WithItems(values ...runtime.RawExtension) *ClusterConfigurationApplyConfiguration { - for i := range values { - b.Items = append(b.Items, values[i]) - } - return b -} - -// WithAPIServer sets the APIServer field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIServer field is set to the value of the last call. -func (b *ClusterConfigurationApplyConfiguration) WithAPIServer(value configv1.APIServerSpec) *ClusterConfigurationApplyConfiguration { - b.APIServer = &value - return b -} - -// WithAuthentication sets the Authentication field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Authentication field is set to the value of the last call. -func (b *ClusterConfigurationApplyConfiguration) WithAuthentication(value configv1.AuthenticationSpec) *ClusterConfigurationApplyConfiguration { - b.Authentication = &value - return b -} - -// WithFeatureGate sets the FeatureGate field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FeatureGate field is set to the value of the last call. -func (b *ClusterConfigurationApplyConfiguration) WithFeatureGate(value configv1.FeatureGateSpec) *ClusterConfigurationApplyConfiguration { - b.FeatureGate = &value - return b -} - -// WithImage sets the Image field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Image field is set to the value of the last call. -func (b *ClusterConfigurationApplyConfiguration) WithImage(value configv1.ImageSpec) *ClusterConfigurationApplyConfiguration { - b.Image = &value - return b -} - -// WithIngress sets the Ingress field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Ingress field is set to the value of the last call. -func (b *ClusterConfigurationApplyConfiguration) WithIngress(value configv1.IngressSpec) *ClusterConfigurationApplyConfiguration { - b.Ingress = &value - return b -} - -// WithNetwork sets the Network field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Network field is set to the value of the last call. -func (b *ClusterConfigurationApplyConfiguration) WithNetwork(value configv1.NetworkSpec) *ClusterConfigurationApplyConfiguration { - b.Network = &value - return b -} - -// WithOAuth sets the OAuth field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the OAuth field is set to the value of the last call. -func (b *ClusterConfigurationApplyConfiguration) WithOAuth(value configv1.OAuthSpec) *ClusterConfigurationApplyConfiguration { - b.OAuth = &value - return b -} - -// WithOperatorHub sets the OperatorHub field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the OperatorHub field is set to the value of the last call. -func (b *ClusterConfigurationApplyConfiguration) WithOperatorHub(value configv1.OperatorHubSpec) *ClusterConfigurationApplyConfiguration { - b.OperatorHub = &value - return b -} - -// WithScheduler sets the Scheduler field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scheduler field is set to the value of the last call. -func (b *ClusterConfigurationApplyConfiguration) WithScheduler(value configv1.SchedulerSpec) *ClusterConfigurationApplyConfiguration { - b.Scheduler = &value - return b -} - -// WithProxy sets the Proxy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Proxy field is set to the value of the last call. -func (b *ClusterConfigurationApplyConfiguration) WithProxy(value configv1.ProxySpec) *ClusterConfigurationApplyConfiguration { - b.Proxy = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusternetworkentry.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusternetworkentry.go deleted file mode 100644 index 9b807c762..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusternetworkentry.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - ipnet "github.com/openshift/hypershift/api/util/ipnet" -) - -// ClusterNetworkEntryApplyConfiguration represents an declarative configuration of the ClusterNetworkEntry type for use -// with apply. -type ClusterNetworkEntryApplyConfiguration struct { - CIDR *ipnet.IPNet `json:"cidr,omitempty"` - HostPrefix *int32 `json:"hostPrefix,omitempty"` -} - -// ClusterNetworkEntryApplyConfiguration constructs an declarative configuration of the ClusterNetworkEntry type for use with -// apply. -func ClusterNetworkEntry() *ClusterNetworkEntryApplyConfiguration { - return &ClusterNetworkEntryApplyConfiguration{} -} - -// WithCIDR sets the CIDR field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CIDR field is set to the value of the last call. -func (b *ClusterNetworkEntryApplyConfiguration) WithCIDR(value ipnet.IPNet) *ClusterNetworkEntryApplyConfiguration { - b.CIDR = &value - return b -} - -// WithHostPrefix sets the HostPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostPrefix field is set to the value of the last call. -func (b *ClusterNetworkEntryApplyConfiguration) WithHostPrefix(value int32) *ClusterNetworkEntryApplyConfiguration { - b.HostPrefix = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusternetworking.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusternetworking.go deleted file mode 100644 index 2e7d3ed9d..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusternetworking.go +++ /dev/null @@ -1,120 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// ClusterNetworkingApplyConfiguration represents an declarative configuration of the ClusterNetworking type for use -// with apply. -type ClusterNetworkingApplyConfiguration struct { - ServiceCIDR *string `json:"serviceCIDR,omitempty"` - PodCIDR *string `json:"podCIDR,omitempty"` - MachineCIDR *string `json:"machineCIDR,omitempty"` - MachineNetwork []MachineNetworkEntryApplyConfiguration `json:"machineNetwork,omitempty"` - ClusterNetwork []ClusterNetworkEntryApplyConfiguration `json:"clusterNetwork,omitempty"` - ServiceNetwork []ServiceNetworkEntryApplyConfiguration `json:"serviceNetwork,omitempty"` - NetworkType *hypershiftv1alpha1.NetworkType `json:"networkType,omitempty"` - APIServer *APIServerNetworkingApplyConfiguration `json:"apiServer,omitempty"` -} - -// ClusterNetworkingApplyConfiguration constructs an declarative configuration of the ClusterNetworking type for use with -// apply. -func ClusterNetworking() *ClusterNetworkingApplyConfiguration { - return &ClusterNetworkingApplyConfiguration{} -} - -// WithServiceCIDR sets the ServiceCIDR field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ServiceCIDR field is set to the value of the last call. -func (b *ClusterNetworkingApplyConfiguration) WithServiceCIDR(value string) *ClusterNetworkingApplyConfiguration { - b.ServiceCIDR = &value - return b -} - -// WithPodCIDR sets the PodCIDR field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PodCIDR field is set to the value of the last call. -func (b *ClusterNetworkingApplyConfiguration) WithPodCIDR(value string) *ClusterNetworkingApplyConfiguration { - b.PodCIDR = &value - return b -} - -// WithMachineCIDR sets the MachineCIDR field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MachineCIDR field is set to the value of the last call. -func (b *ClusterNetworkingApplyConfiguration) WithMachineCIDR(value string) *ClusterNetworkingApplyConfiguration { - b.MachineCIDR = &value - return b -} - -// WithMachineNetwork adds the given value to the MachineNetwork field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the MachineNetwork field. -func (b *ClusterNetworkingApplyConfiguration) WithMachineNetwork(values ...*MachineNetworkEntryApplyConfiguration) *ClusterNetworkingApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithMachineNetwork") - } - b.MachineNetwork = append(b.MachineNetwork, *values[i]) - } - return b -} - -// WithClusterNetwork adds the given value to the ClusterNetwork field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ClusterNetwork field. -func (b *ClusterNetworkingApplyConfiguration) WithClusterNetwork(values ...*ClusterNetworkEntryApplyConfiguration) *ClusterNetworkingApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithClusterNetwork") - } - b.ClusterNetwork = append(b.ClusterNetwork, *values[i]) - } - return b -} - -// WithServiceNetwork adds the given value to the ServiceNetwork field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ServiceNetwork field. -func (b *ClusterNetworkingApplyConfiguration) WithServiceNetwork(values ...*ServiceNetworkEntryApplyConfiguration) *ClusterNetworkingApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithServiceNetwork") - } - b.ServiceNetwork = append(b.ServiceNetwork, *values[i]) - } - return b -} - -// WithNetworkType sets the NetworkType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NetworkType field is set to the value of the last call. -func (b *ClusterNetworkingApplyConfiguration) WithNetworkType(value hypershiftv1alpha1.NetworkType) *ClusterNetworkingApplyConfiguration { - b.NetworkType = &value - return b -} - -// WithAPIServer sets the APIServer field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIServer field is set to the value of the last call. -func (b *ClusterNetworkingApplyConfiguration) WithAPIServer(value *APIServerNetworkingApplyConfiguration) *ClusterNetworkingApplyConfiguration { - b.APIServer = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterversionstatus.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterversionstatus.go deleted file mode 100644 index faa3f8d6a..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/clusterversionstatus.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "github.com/openshift/api/config/v1" -) - -// ClusterVersionStatusApplyConfiguration represents an declarative configuration of the ClusterVersionStatus type for use -// with apply. -type ClusterVersionStatusApplyConfiguration struct { - Desired *v1.Release `json:"desired,omitempty"` - History []v1.UpdateHistory `json:"history,omitempty"` - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - AvailableUpdates []v1.Release `json:"availableUpdates,omitempty"` - ConditionalUpdates []v1.ConditionalUpdate `json:"conditionalUpdates,omitempty"` -} - -// ClusterVersionStatusApplyConfiguration constructs an declarative configuration of the ClusterVersionStatus type for use with -// apply. -func ClusterVersionStatus() *ClusterVersionStatusApplyConfiguration { - return &ClusterVersionStatusApplyConfiguration{} -} - -// WithDesired sets the Desired field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Desired field is set to the value of the last call. -func (b *ClusterVersionStatusApplyConfiguration) WithDesired(value v1.Release) *ClusterVersionStatusApplyConfiguration { - b.Desired = &value - return b -} - -// WithHistory adds the given value to the History field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the History field. -func (b *ClusterVersionStatusApplyConfiguration) WithHistory(values ...v1.UpdateHistory) *ClusterVersionStatusApplyConfiguration { - for i := range values { - b.History = append(b.History, values[i]) - } - return b -} - -// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ObservedGeneration field is set to the value of the last call. -func (b *ClusterVersionStatusApplyConfiguration) WithObservedGeneration(value int64) *ClusterVersionStatusApplyConfiguration { - b.ObservedGeneration = &value - return b -} - -// WithAvailableUpdates adds the given value to the AvailableUpdates field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AvailableUpdates field. -func (b *ClusterVersionStatusApplyConfiguration) WithAvailableUpdates(values ...v1.Release) *ClusterVersionStatusApplyConfiguration { - for i := range values { - b.AvailableUpdates = append(b.AvailableUpdates, values[i]) - } - return b -} - -// WithConditionalUpdates adds the given value to the ConditionalUpdates field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ConditionalUpdates field. -func (b *ClusterVersionStatusApplyConfiguration) WithConditionalUpdates(values ...v1.ConditionalUpdate) *ClusterVersionStatusApplyConfiguration { - for i := range values { - b.ConditionalUpdates = append(b.ConditionalUpdates, values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/diagnostics.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/diagnostics.go deleted file mode 100644 index c0f5b9ec2..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/diagnostics.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// DiagnosticsApplyConfiguration represents an declarative configuration of the Diagnostics type for use -// with apply. -type DiagnosticsApplyConfiguration struct { - StorageAccountType *string `json:"storageAccountType,omitempty"` - StorageAccountURI *string `json:"storageAccountURI,omitempty"` -} - -// DiagnosticsApplyConfiguration constructs an declarative configuration of the Diagnostics type for use with -// apply. -func Diagnostics() *DiagnosticsApplyConfiguration { - return &DiagnosticsApplyConfiguration{} -} - -// WithStorageAccountType sets the StorageAccountType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StorageAccountType field is set to the value of the last call. -func (b *DiagnosticsApplyConfiguration) WithStorageAccountType(value string) *DiagnosticsApplyConfiguration { - b.StorageAccountType = &value - return b -} - -// WithStorageAccountURI sets the StorageAccountURI field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StorageAccountURI field is set to the value of the last call. -func (b *DiagnosticsApplyConfiguration) WithStorageAccountURI(value string) *DiagnosticsApplyConfiguration { - b.StorageAccountURI = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/dnsspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/dnsspec.go deleted file mode 100644 index e42ef614c..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/dnsspec.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// DNSSpecApplyConfiguration represents an declarative configuration of the DNSSpec type for use -// with apply. -type DNSSpecApplyConfiguration struct { - BaseDomain *string `json:"baseDomain,omitempty"` - BaseDomainPrefix *string `json:"baseDomainPrefix,omitempty"` - PublicZoneID *string `json:"publicZoneID,omitempty"` - PrivateZoneID *string `json:"privateZoneID,omitempty"` -} - -// DNSSpecApplyConfiguration constructs an declarative configuration of the DNSSpec type for use with -// apply. -func DNSSpec() *DNSSpecApplyConfiguration { - return &DNSSpecApplyConfiguration{} -} - -// WithBaseDomain sets the BaseDomain field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the BaseDomain field is set to the value of the last call. -func (b *DNSSpecApplyConfiguration) WithBaseDomain(value string) *DNSSpecApplyConfiguration { - b.BaseDomain = &value - return b -} - -// WithBaseDomainPrefix sets the BaseDomainPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the BaseDomainPrefix field is set to the value of the last call. -func (b *DNSSpecApplyConfiguration) WithBaseDomainPrefix(value string) *DNSSpecApplyConfiguration { - b.BaseDomainPrefix = &value - return b -} - -// WithPublicZoneID sets the PublicZoneID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PublicZoneID field is set to the value of the last call. -func (b *DNSSpecApplyConfiguration) WithPublicZoneID(value string) *DNSSpecApplyConfiguration { - b.PublicZoneID = &value - return b -} - -// WithPrivateZoneID sets the PrivateZoneID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PrivateZoneID field is set to the value of the last call. -func (b *DNSSpecApplyConfiguration) WithPrivateZoneID(value string) *DNSSpecApplyConfiguration { - b.PrivateZoneID = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/etcdspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/etcdspec.go deleted file mode 100644 index 55981cc5d..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/etcdspec.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// EtcdSpecApplyConfiguration represents an declarative configuration of the EtcdSpec type for use -// with apply. -type EtcdSpecApplyConfiguration struct { - ManagementType *v1alpha1.EtcdManagementType `json:"managementType,omitempty"` - Managed *ManagedEtcdSpecApplyConfiguration `json:"managed,omitempty"` - Unmanaged *UnmanagedEtcdSpecApplyConfiguration `json:"unmanaged,omitempty"` -} - -// EtcdSpecApplyConfiguration constructs an declarative configuration of the EtcdSpec type for use with -// apply. -func EtcdSpec() *EtcdSpecApplyConfiguration { - return &EtcdSpecApplyConfiguration{} -} - -// WithManagementType sets the ManagementType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ManagementType field is set to the value of the last call. -func (b *EtcdSpecApplyConfiguration) WithManagementType(value v1alpha1.EtcdManagementType) *EtcdSpecApplyConfiguration { - b.ManagementType = &value - return b -} - -// WithManaged sets the Managed field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Managed field is set to the value of the last call. -func (b *EtcdSpecApplyConfiguration) WithManaged(value *ManagedEtcdSpecApplyConfiguration) *EtcdSpecApplyConfiguration { - b.Managed = value - return b -} - -// WithUnmanaged sets the Unmanaged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Unmanaged field is set to the value of the last call. -func (b *EtcdSpecApplyConfiguration) WithUnmanaged(value *UnmanagedEtcdSpecApplyConfiguration) *EtcdSpecApplyConfiguration { - b.Unmanaged = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/etcdtlsconfig.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/etcdtlsconfig.go deleted file mode 100644 index 2a0b7ab50..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/etcdtlsconfig.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" -) - -// EtcdTLSConfigApplyConfiguration represents an declarative configuration of the EtcdTLSConfig type for use -// with apply. -type EtcdTLSConfigApplyConfiguration struct { - ClientSecret *v1.LocalObjectReference `json:"clientSecret,omitempty"` -} - -// EtcdTLSConfigApplyConfiguration constructs an declarative configuration of the EtcdTLSConfig type for use with -// apply. -func EtcdTLSConfig() *EtcdTLSConfigApplyConfiguration { - return &EtcdTLSConfigApplyConfiguration{} -} - -// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClientSecret field is set to the value of the last call. -func (b *EtcdTLSConfigApplyConfiguration) WithClientSecret(value v1.LocalObjectReference) *EtcdTLSConfigApplyConfiguration { - b.ClientSecret = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/filter.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/filter.go deleted file mode 100644 index 08ff68ed2..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/filter.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// FilterApplyConfiguration represents an declarative configuration of the Filter type for use -// with apply. -type FilterApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Values []string `json:"values,omitempty"` -} - -// FilterApplyConfiguration constructs an declarative configuration of the Filter type for use with -// apply. -func Filter() *FilterApplyConfiguration { - return &FilterApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *FilterApplyConfiguration) WithName(value string) *FilterApplyConfiguration { - b.Name = &value - return b -} - -// WithValues adds the given value to the Values field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Values field. -func (b *FilterApplyConfiguration) WithValues(values ...string) *FilterApplyConfiguration { - for i := range values { - b.Values = append(b.Values, values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedcluster.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedcluster.go deleted file mode 100644 index 69f9d5dc6..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedcluster.go +++ /dev/null @@ -1,218 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// HostedClusterApplyConfiguration represents an declarative configuration of the HostedCluster type for use -// with apply. -type HostedClusterApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *HostedClusterSpecApplyConfiguration `json:"spec,omitempty"` - Status *HostedClusterStatusApplyConfiguration `json:"status,omitempty"` -} - -// HostedCluster constructs an declarative configuration of the HostedCluster type for use with -// apply. -func HostedCluster(name, namespace string) *HostedClusterApplyConfiguration { - b := &HostedClusterApplyConfiguration{} - b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("HostedCluster") - b.WithAPIVersion("hypershift.openshift.io/v1alpha1") - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithKind(value string) *HostedClusterApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithAPIVersion(value string) *HostedClusterApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithName(value string) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithGenerateName(value string) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithNamespace(value string) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithUID(value types.UID) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithResourceVersion(value string) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithGeneration(value int64) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *HostedClusterApplyConfiguration) WithLabels(entries map[string]string) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *HostedClusterApplyConfiguration) WithAnnotations(entries map[string]string) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *HostedClusterApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *HostedClusterApplyConfiguration) WithFinalizers(values ...string) *HostedClusterApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *HostedClusterApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithSpec(value *HostedClusterSpecApplyConfiguration) *HostedClusterApplyConfiguration { - b.Spec = value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *HostedClusterApplyConfiguration) WithStatus(value *HostedClusterStatusApplyConfiguration) *HostedClusterApplyConfiguration { - b.Status = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedclusterspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedclusterspec.go deleted file mode 100644 index cafa1f357..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedclusterspec.go +++ /dev/null @@ -1,294 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "github.com/openshift/api/config/v1" - hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - corev1 "k8s.io/api/core/v1" -) - -// HostedClusterSpecApplyConfiguration represents an declarative configuration of the HostedClusterSpec type for use -// with apply. -type HostedClusterSpecApplyConfiguration struct { - Release *ReleaseApplyConfiguration `json:"release,omitempty"` - ControlPlaneRelease *ReleaseApplyConfiguration `json:"controlPlaneRelease,omitempty"` - ClusterID *string `json:"clusterID,omitempty"` - UpdateService *v1.URL `json:"updateService,omitempty"` - Channel *string `json:"channel,omitempty"` - InfraID *string `json:"infraID,omitempty"` - Platform *PlatformSpecApplyConfiguration `json:"platform,omitempty"` - ControllerAvailabilityPolicy *hypershiftv1alpha1.AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"` - InfrastructureAvailabilityPolicy *hypershiftv1alpha1.AvailabilityPolicy `json:"infrastructureAvailabilityPolicy,omitempty"` - DNS *DNSSpecApplyConfiguration `json:"dns,omitempty"` - Networking *ClusterNetworkingApplyConfiguration `json:"networking,omitempty"` - Autoscaling *ClusterAutoscalingApplyConfiguration `json:"autoscaling,omitempty"` - Etcd *EtcdSpecApplyConfiguration `json:"etcd,omitempty"` - Services []ServicePublishingStrategyMappingApplyConfiguration `json:"services,omitempty"` - PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty"` - SSHKey *corev1.LocalObjectReference `json:"sshKey,omitempty"` - IssuerURL *string `json:"issuerURL,omitempty"` - ServiceAccountSigningKey *corev1.LocalObjectReference `json:"serviceAccountSigningKey,omitempty"` - Configuration *ClusterConfigurationApplyConfiguration `json:"configuration,omitempty"` - AuditWebhook *corev1.LocalObjectReference `json:"auditWebhook,omitempty"` - ImageContentSources []ImageContentSourceApplyConfiguration `json:"imageContentSources,omitempty"` - AdditionalTrustBundle *corev1.LocalObjectReference `json:"additionalTrustBundle,omitempty"` - SecretEncryption *SecretEncryptionSpecApplyConfiguration `json:"secretEncryption,omitempty"` - FIPS *bool `json:"fips,omitempty"` - PausedUntil *string `json:"pausedUntil,omitempty"` - OLMCatalogPlacement *hypershiftv1alpha1.OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` -} - -// HostedClusterSpecApplyConfiguration constructs an declarative configuration of the HostedClusterSpec type for use with -// apply. -func HostedClusterSpec() *HostedClusterSpecApplyConfiguration { - return &HostedClusterSpecApplyConfiguration{} -} - -// WithRelease sets the Release field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Release field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithRelease(value *ReleaseApplyConfiguration) *HostedClusterSpecApplyConfiguration { - b.Release = value - return b -} - -// WithControlPlaneRelease sets the ControlPlaneRelease field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ControlPlaneRelease field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithControlPlaneRelease(value *ReleaseApplyConfiguration) *HostedClusterSpecApplyConfiguration { - b.ControlPlaneRelease = value - return b -} - -// WithClusterID sets the ClusterID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterID field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithClusterID(value string) *HostedClusterSpecApplyConfiguration { - b.ClusterID = &value - return b -} - -// WithUpdateService sets the UpdateService field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UpdateService field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithUpdateService(value v1.URL) *HostedClusterSpecApplyConfiguration { - b.UpdateService = &value - return b -} - -// WithChannel sets the Channel field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Channel field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithChannel(value string) *HostedClusterSpecApplyConfiguration { - b.Channel = &value - return b -} - -// WithInfraID sets the InfraID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InfraID field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithInfraID(value string) *HostedClusterSpecApplyConfiguration { - b.InfraID = &value - return b -} - -// WithPlatform sets the Platform field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Platform field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithPlatform(value *PlatformSpecApplyConfiguration) *HostedClusterSpecApplyConfiguration { - b.Platform = value - return b -} - -// WithControllerAvailabilityPolicy sets the ControllerAvailabilityPolicy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ControllerAvailabilityPolicy field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithControllerAvailabilityPolicy(value hypershiftv1alpha1.AvailabilityPolicy) *HostedClusterSpecApplyConfiguration { - b.ControllerAvailabilityPolicy = &value - return b -} - -// WithInfrastructureAvailabilityPolicy sets the InfrastructureAvailabilityPolicy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InfrastructureAvailabilityPolicy field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithInfrastructureAvailabilityPolicy(value hypershiftv1alpha1.AvailabilityPolicy) *HostedClusterSpecApplyConfiguration { - b.InfrastructureAvailabilityPolicy = &value - return b -} - -// WithDNS sets the DNS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DNS field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithDNS(value *DNSSpecApplyConfiguration) *HostedClusterSpecApplyConfiguration { - b.DNS = value - return b -} - -// WithNetworking sets the Networking field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Networking field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithNetworking(value *ClusterNetworkingApplyConfiguration) *HostedClusterSpecApplyConfiguration { - b.Networking = value - return b -} - -// WithAutoscaling sets the Autoscaling field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Autoscaling field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithAutoscaling(value *ClusterAutoscalingApplyConfiguration) *HostedClusterSpecApplyConfiguration { - b.Autoscaling = value - return b -} - -// WithEtcd sets the Etcd field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Etcd field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithEtcd(value *EtcdSpecApplyConfiguration) *HostedClusterSpecApplyConfiguration { - b.Etcd = value - return b -} - -// WithServices adds the given value to the Services field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Services field. -func (b *HostedClusterSpecApplyConfiguration) WithServices(values ...*ServicePublishingStrategyMappingApplyConfiguration) *HostedClusterSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithServices") - } - b.Services = append(b.Services, *values[i]) - } - return b -} - -// WithPullSecret sets the PullSecret field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PullSecret field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithPullSecret(value corev1.LocalObjectReference) *HostedClusterSpecApplyConfiguration { - b.PullSecret = &value - return b -} - -// WithSSHKey sets the SSHKey field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SSHKey field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithSSHKey(value corev1.LocalObjectReference) *HostedClusterSpecApplyConfiguration { - b.SSHKey = &value - return b -} - -// WithIssuerURL sets the IssuerURL field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IssuerURL field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithIssuerURL(value string) *HostedClusterSpecApplyConfiguration { - b.IssuerURL = &value - return b -} - -// WithServiceAccountSigningKey sets the ServiceAccountSigningKey field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ServiceAccountSigningKey field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithServiceAccountSigningKey(value corev1.LocalObjectReference) *HostedClusterSpecApplyConfiguration { - b.ServiceAccountSigningKey = &value - return b -} - -// WithConfiguration sets the Configuration field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Configuration field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithConfiguration(value *ClusterConfigurationApplyConfiguration) *HostedClusterSpecApplyConfiguration { - b.Configuration = value - return b -} - -// WithAuditWebhook sets the AuditWebhook field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AuditWebhook field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithAuditWebhook(value corev1.LocalObjectReference) *HostedClusterSpecApplyConfiguration { - b.AuditWebhook = &value - return b -} - -// WithImageContentSources adds the given value to the ImageContentSources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ImageContentSources field. -func (b *HostedClusterSpecApplyConfiguration) WithImageContentSources(values ...*ImageContentSourceApplyConfiguration) *HostedClusterSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithImageContentSources") - } - b.ImageContentSources = append(b.ImageContentSources, *values[i]) - } - return b -} - -// WithAdditionalTrustBundle sets the AdditionalTrustBundle field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AdditionalTrustBundle field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithAdditionalTrustBundle(value corev1.LocalObjectReference) *HostedClusterSpecApplyConfiguration { - b.AdditionalTrustBundle = &value - return b -} - -// WithSecretEncryption sets the SecretEncryption field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SecretEncryption field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithSecretEncryption(value *SecretEncryptionSpecApplyConfiguration) *HostedClusterSpecApplyConfiguration { - b.SecretEncryption = value - return b -} - -// WithFIPS sets the FIPS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FIPS field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithFIPS(value bool) *HostedClusterSpecApplyConfiguration { - b.FIPS = &value - return b -} - -// WithPausedUntil sets the PausedUntil field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PausedUntil field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithPausedUntil(value string) *HostedClusterSpecApplyConfiguration { - b.PausedUntil = &value - return b -} - -// WithOLMCatalogPlacement sets the OLMCatalogPlacement field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the OLMCatalogPlacement field is set to the value of the last call. -func (b *HostedClusterSpecApplyConfiguration) WithOLMCatalogPlacement(value hypershiftv1alpha1.OLMCatalogPlacement) *HostedClusterSpecApplyConfiguration { - b.OLMCatalogPlacement = &value - return b -} - -// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the NodeSelector field, -// overwriting an existing map entries in NodeSelector field with the same key. -func (b *HostedClusterSpecApplyConfiguration) WithNodeSelector(entries map[string]string) *HostedClusterSpecApplyConfiguration { - if b.NodeSelector == nil && len(entries) > 0 { - b.NodeSelector = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.NodeSelector[k] = v - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedclusterstatus.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedclusterstatus.go deleted file mode 100644 index d5d2565c5..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/hostedclusterstatus.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// HostedClusterStatusApplyConfiguration represents an declarative configuration of the HostedClusterStatus type for use -// with apply. -type HostedClusterStatusApplyConfiguration struct { - Version *ClusterVersionStatusApplyConfiguration `json:"version,omitempty"` - KubeConfig *v1.LocalObjectReference `json:"kubeconfig,omitempty"` - KubeadminPassword *v1.LocalObjectReference `json:"kubeadminPassword,omitempty"` - IgnitionEndpoint *string `json:"ignitionEndpoint,omitempty"` - ControlPlaneEndpoint *APIEndpointApplyConfiguration `json:"controlPlaneEndpoint,omitempty"` - OAuthCallbackURLTemplate *string `json:"oauthCallbackURLTemplate,omitempty"` - Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` - Platform *PlatformStatusApplyConfiguration `json:"platform,omitempty"` -} - -// HostedClusterStatusApplyConfiguration constructs an declarative configuration of the HostedClusterStatus type for use with -// apply. -func HostedClusterStatus() *HostedClusterStatusApplyConfiguration { - return &HostedClusterStatusApplyConfiguration{} -} - -// WithVersion sets the Version field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Version field is set to the value of the last call. -func (b *HostedClusterStatusApplyConfiguration) WithVersion(value *ClusterVersionStatusApplyConfiguration) *HostedClusterStatusApplyConfiguration { - b.Version = value - return b -} - -// WithKubeConfig sets the KubeConfig field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KubeConfig field is set to the value of the last call. -func (b *HostedClusterStatusApplyConfiguration) WithKubeConfig(value v1.LocalObjectReference) *HostedClusterStatusApplyConfiguration { - b.KubeConfig = &value - return b -} - -// WithKubeadminPassword sets the KubeadminPassword field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KubeadminPassword field is set to the value of the last call. -func (b *HostedClusterStatusApplyConfiguration) WithKubeadminPassword(value v1.LocalObjectReference) *HostedClusterStatusApplyConfiguration { - b.KubeadminPassword = &value - return b -} - -// WithIgnitionEndpoint sets the IgnitionEndpoint field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IgnitionEndpoint field is set to the value of the last call. -func (b *HostedClusterStatusApplyConfiguration) WithIgnitionEndpoint(value string) *HostedClusterStatusApplyConfiguration { - b.IgnitionEndpoint = &value - return b -} - -// WithControlPlaneEndpoint sets the ControlPlaneEndpoint field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ControlPlaneEndpoint field is set to the value of the last call. -func (b *HostedClusterStatusApplyConfiguration) WithControlPlaneEndpoint(value *APIEndpointApplyConfiguration) *HostedClusterStatusApplyConfiguration { - b.ControlPlaneEndpoint = value - return b -} - -// WithOAuthCallbackURLTemplate sets the OAuthCallbackURLTemplate field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the OAuthCallbackURLTemplate field is set to the value of the last call. -func (b *HostedClusterStatusApplyConfiguration) WithOAuthCallbackURLTemplate(value string) *HostedClusterStatusApplyConfiguration { - b.OAuthCallbackURLTemplate = &value - return b -} - -// WithConditions adds the given value to the Conditions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *HostedClusterStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *HostedClusterStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithConditions") - } - b.Conditions = append(b.Conditions, *values[i]) - } - return b -} - -// WithPlatform sets the Platform field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Platform field is set to the value of the last call. -func (b *HostedClusterStatusApplyConfiguration) WithPlatform(value *PlatformStatusApplyConfiguration) *HostedClusterStatusApplyConfiguration { - b.Platform = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsauthspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsauthspec.go deleted file mode 100644 index ddaf5c5b1..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsauthspec.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// IBMCloudKMSAuthSpecApplyConfiguration represents an declarative configuration of the IBMCloudKMSAuthSpec type for use -// with apply. -type IBMCloudKMSAuthSpecApplyConfiguration struct { - Type *v1alpha1.IBMCloudKMSAuthType `json:"type,omitempty"` - Unmanaged *IBMCloudKMSUnmanagedAuthSpecApplyConfiguration `json:"unmanaged,omitempty"` - Managed *v1alpha1.IBMCloudKMSManagedAuthSpec `json:"managed,omitempty"` -} - -// IBMCloudKMSAuthSpecApplyConfiguration constructs an declarative configuration of the IBMCloudKMSAuthSpec type for use with -// apply. -func IBMCloudKMSAuthSpec() *IBMCloudKMSAuthSpecApplyConfiguration { - return &IBMCloudKMSAuthSpecApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *IBMCloudKMSAuthSpecApplyConfiguration) WithType(value v1alpha1.IBMCloudKMSAuthType) *IBMCloudKMSAuthSpecApplyConfiguration { - b.Type = &value - return b -} - -// WithUnmanaged sets the Unmanaged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Unmanaged field is set to the value of the last call. -func (b *IBMCloudKMSAuthSpecApplyConfiguration) WithUnmanaged(value *IBMCloudKMSUnmanagedAuthSpecApplyConfiguration) *IBMCloudKMSAuthSpecApplyConfiguration { - b.Unmanaged = value - return b -} - -// WithManaged sets the Managed field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Managed field is set to the value of the last call. -func (b *IBMCloudKMSAuthSpecApplyConfiguration) WithManaged(value v1alpha1.IBMCloudKMSManagedAuthSpec) *IBMCloudKMSAuthSpecApplyConfiguration { - b.Managed = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmskeyentry.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmskeyentry.go deleted file mode 100644 index a1c568049..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmskeyentry.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// IBMCloudKMSKeyEntryApplyConfiguration represents an declarative configuration of the IBMCloudKMSKeyEntry type for use -// with apply. -type IBMCloudKMSKeyEntryApplyConfiguration struct { - CRKID *string `json:"crkID,omitempty"` - InstanceID *string `json:"instanceID,omitempty"` - CorrelationID *string `json:"correlationID,omitempty"` - URL *string `json:"url,omitempty"` - KeyVersion *int `json:"keyVersion,omitempty"` -} - -// IBMCloudKMSKeyEntryApplyConfiguration constructs an declarative configuration of the IBMCloudKMSKeyEntry type for use with -// apply. -func IBMCloudKMSKeyEntry() *IBMCloudKMSKeyEntryApplyConfiguration { - return &IBMCloudKMSKeyEntryApplyConfiguration{} -} - -// WithCRKID sets the CRKID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CRKID field is set to the value of the last call. -func (b *IBMCloudKMSKeyEntryApplyConfiguration) WithCRKID(value string) *IBMCloudKMSKeyEntryApplyConfiguration { - b.CRKID = &value - return b -} - -// WithInstanceID sets the InstanceID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InstanceID field is set to the value of the last call. -func (b *IBMCloudKMSKeyEntryApplyConfiguration) WithInstanceID(value string) *IBMCloudKMSKeyEntryApplyConfiguration { - b.InstanceID = &value - return b -} - -// WithCorrelationID sets the CorrelationID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CorrelationID field is set to the value of the last call. -func (b *IBMCloudKMSKeyEntryApplyConfiguration) WithCorrelationID(value string) *IBMCloudKMSKeyEntryApplyConfiguration { - b.CorrelationID = &value - return b -} - -// WithURL sets the URL field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the URL field is set to the value of the last call. -func (b *IBMCloudKMSKeyEntryApplyConfiguration) WithURL(value string) *IBMCloudKMSKeyEntryApplyConfiguration { - b.URL = &value - return b -} - -// WithKeyVersion sets the KeyVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KeyVersion field is set to the value of the last call. -func (b *IBMCloudKMSKeyEntryApplyConfiguration) WithKeyVersion(value int) *IBMCloudKMSKeyEntryApplyConfiguration { - b.KeyVersion = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsspec.go deleted file mode 100644 index 1a53d5006..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsspec.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// IBMCloudKMSSpecApplyConfiguration represents an declarative configuration of the IBMCloudKMSSpec type for use -// with apply. -type IBMCloudKMSSpecApplyConfiguration struct { - Region *string `json:"region,omitempty"` - Auth *IBMCloudKMSAuthSpecApplyConfiguration `json:"auth,omitempty"` - KeyList []IBMCloudKMSKeyEntryApplyConfiguration `json:"keyList,omitempty"` -} - -// IBMCloudKMSSpecApplyConfiguration constructs an declarative configuration of the IBMCloudKMSSpec type for use with -// apply. -func IBMCloudKMSSpec() *IBMCloudKMSSpecApplyConfiguration { - return &IBMCloudKMSSpecApplyConfiguration{} -} - -// WithRegion sets the Region field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Region field is set to the value of the last call. -func (b *IBMCloudKMSSpecApplyConfiguration) WithRegion(value string) *IBMCloudKMSSpecApplyConfiguration { - b.Region = &value - return b -} - -// WithAuth sets the Auth field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Auth field is set to the value of the last call. -func (b *IBMCloudKMSSpecApplyConfiguration) WithAuth(value *IBMCloudKMSAuthSpecApplyConfiguration) *IBMCloudKMSSpecApplyConfiguration { - b.Auth = value - return b -} - -// WithKeyList adds the given value to the KeyList field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the KeyList field. -func (b *IBMCloudKMSSpecApplyConfiguration) WithKeyList(values ...*IBMCloudKMSKeyEntryApplyConfiguration) *IBMCloudKMSSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithKeyList") - } - b.KeyList = append(b.KeyList, *values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsunmanagedauthspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsunmanagedauthspec.go deleted file mode 100644 index d3e7dc2ad..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudkmsunmanagedauthspec.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" -) - -// IBMCloudKMSUnmanagedAuthSpecApplyConfiguration represents an declarative configuration of the IBMCloudKMSUnmanagedAuthSpec type for use -// with apply. -type IBMCloudKMSUnmanagedAuthSpecApplyConfiguration struct { - Credentials *v1.LocalObjectReference `json:"credentials,omitempty"` -} - -// IBMCloudKMSUnmanagedAuthSpecApplyConfiguration constructs an declarative configuration of the IBMCloudKMSUnmanagedAuthSpec type for use with -// apply. -func IBMCloudKMSUnmanagedAuthSpec() *IBMCloudKMSUnmanagedAuthSpecApplyConfiguration { - return &IBMCloudKMSUnmanagedAuthSpecApplyConfiguration{} -} - -// WithCredentials sets the Credentials field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Credentials field is set to the value of the last call. -func (b *IBMCloudKMSUnmanagedAuthSpecApplyConfiguration) WithCredentials(value v1.LocalObjectReference) *IBMCloudKMSUnmanagedAuthSpecApplyConfiguration { - b.Credentials = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudplatformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudplatformspec.go deleted file mode 100644 index 0a09f34c0..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/ibmcloudplatformspec.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "github.com/openshift/api/config/v1" -) - -// IBMCloudPlatformSpecApplyConfiguration represents an declarative configuration of the IBMCloudPlatformSpec type for use -// with apply. -type IBMCloudPlatformSpecApplyConfiguration struct { - ProviderType *v1.IBMCloudProviderType `json:"providerType,omitempty"` -} - -// IBMCloudPlatformSpecApplyConfiguration constructs an declarative configuration of the IBMCloudPlatformSpec type for use with -// apply. -func IBMCloudPlatformSpec() *IBMCloudPlatformSpecApplyConfiguration { - return &IBMCloudPlatformSpecApplyConfiguration{} -} - -// WithProviderType sets the ProviderType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ProviderType field is set to the value of the last call. -func (b *IBMCloudPlatformSpecApplyConfiguration) WithProviderType(value v1.IBMCloudProviderType) *IBMCloudPlatformSpecApplyConfiguration { - b.ProviderType = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/imagecontentsource.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/imagecontentsource.go deleted file mode 100644 index 868ca0418..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/imagecontentsource.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// ImageContentSourceApplyConfiguration represents an declarative configuration of the ImageContentSource type for use -// with apply. -type ImageContentSourceApplyConfiguration struct { - Source *string `json:"source,omitempty"` - Mirrors []string `json:"mirrors,omitempty"` -} - -// ImageContentSourceApplyConfiguration constructs an declarative configuration of the ImageContentSource type for use with -// apply. -func ImageContentSource() *ImageContentSourceApplyConfiguration { - return &ImageContentSourceApplyConfiguration{} -} - -// WithSource sets the Source field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Source field is set to the value of the last call. -func (b *ImageContentSourceApplyConfiguration) WithSource(value string) *ImageContentSourceApplyConfiguration { - b.Source = &value - return b -} - -// WithMirrors adds the given value to the Mirrors field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Mirrors field. -func (b *ImageContentSourceApplyConfiguration) WithMirrors(values ...string) *ImageContentSourceApplyConfiguration { - for i := range values { - b.Mirrors = append(b.Mirrors, values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/inplaceupgrade.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/inplaceupgrade.go deleted file mode 100644 index 4d43c87af..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/inplaceupgrade.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// InPlaceUpgradeApplyConfiguration represents an declarative configuration of the InPlaceUpgrade type for use -// with apply. -type InPlaceUpgradeApplyConfiguration struct { - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` -} - -// InPlaceUpgradeApplyConfiguration constructs an declarative configuration of the InPlaceUpgrade type for use with -// apply. -func InPlaceUpgrade() *InPlaceUpgradeApplyConfiguration { - return &InPlaceUpgradeApplyConfiguration{} -} - -// WithMaxUnavailable sets the MaxUnavailable field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MaxUnavailable field is set to the value of the last call. -func (b *InPlaceUpgradeApplyConfiguration) WithMaxUnavailable(value intstr.IntOrString) *InPlaceUpgradeApplyConfiguration { - b.MaxUnavailable = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kmsspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kmsspec.go deleted file mode 100644 index 8181aeaf2..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kmsspec.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// KMSSpecApplyConfiguration represents an declarative configuration of the KMSSpec type for use -// with apply. -type KMSSpecApplyConfiguration struct { - Provider *v1alpha1.KMSProvider `json:"provider,omitempty"` - IBMCloud *IBMCloudKMSSpecApplyConfiguration `json:"ibmcloud,omitempty"` - AWS *AWSKMSSpecApplyConfiguration `json:"aws,omitempty"` - Azure *AzureKMSSpecApplyConfiguration `json:"azure,omitempty"` -} - -// KMSSpecApplyConfiguration constructs an declarative configuration of the KMSSpec type for use with -// apply. -func KMSSpec() *KMSSpecApplyConfiguration { - return &KMSSpecApplyConfiguration{} -} - -// WithProvider sets the Provider field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Provider field is set to the value of the last call. -func (b *KMSSpecApplyConfiguration) WithProvider(value v1alpha1.KMSProvider) *KMSSpecApplyConfiguration { - b.Provider = &value - return b -} - -// WithIBMCloud sets the IBMCloud field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IBMCloud field is set to the value of the last call. -func (b *KMSSpecApplyConfiguration) WithIBMCloud(value *IBMCloudKMSSpecApplyConfiguration) *KMSSpecApplyConfiguration { - b.IBMCloud = value - return b -} - -// WithAWS sets the AWS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AWS field is set to the value of the last call. -func (b *KMSSpecApplyConfiguration) WithAWS(value *AWSKMSSpecApplyConfiguration) *KMSSpecApplyConfiguration { - b.AWS = value - return b -} - -// WithAzure sets the Azure field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Azure field is set to the value of the last call. -func (b *KMSSpecApplyConfiguration) WithAzure(value *AzureKMSSpecApplyConfiguration) *KMSSpecApplyConfiguration { - b.Azure = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtcachingstrategy.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtcachingstrategy.go deleted file mode 100644 index f2b669503..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtcachingstrategy.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// KubevirtCachingStrategyApplyConfiguration represents an declarative configuration of the KubevirtCachingStrategy type for use -// with apply. -type KubevirtCachingStrategyApplyConfiguration struct { - Type *v1alpha1.KubevirtCachingStrategyType `json:"type,omitempty"` -} - -// KubevirtCachingStrategyApplyConfiguration constructs an declarative configuration of the KubevirtCachingStrategy type for use with -// apply. -func KubevirtCachingStrategy() *KubevirtCachingStrategyApplyConfiguration { - return &KubevirtCachingStrategyApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *KubevirtCachingStrategyApplyConfiguration) WithType(value v1alpha1.KubevirtCachingStrategyType) *KubevirtCachingStrategyApplyConfiguration { - b.Type = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtcompute.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtcompute.go deleted file mode 100644 index fdb4835b8..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtcompute.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - resource "k8s.io/apimachinery/pkg/api/resource" -) - -// KubevirtComputeApplyConfiguration represents an declarative configuration of the KubevirtCompute type for use -// with apply. -type KubevirtComputeApplyConfiguration struct { - Memory *resource.Quantity `json:"memory,omitempty"` - Cores *uint32 `json:"cores,omitempty"` - QosClass *v1alpha1.QoSClass `json:"qosClass,omitempty"` -} - -// KubevirtComputeApplyConfiguration constructs an declarative configuration of the KubevirtCompute type for use with -// apply. -func KubevirtCompute() *KubevirtComputeApplyConfiguration { - return &KubevirtComputeApplyConfiguration{} -} - -// WithMemory sets the Memory field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Memory field is set to the value of the last call. -func (b *KubevirtComputeApplyConfiguration) WithMemory(value resource.Quantity) *KubevirtComputeApplyConfiguration { - b.Memory = &value - return b -} - -// WithCores sets the Cores field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Cores field is set to the value of the last call. -func (b *KubevirtComputeApplyConfiguration) WithCores(value uint32) *KubevirtComputeApplyConfiguration { - b.Cores = &value - return b -} - -// WithQosClass sets the QosClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the QosClass field is set to the value of the last call. -func (b *KubevirtComputeApplyConfiguration) WithQosClass(value v1alpha1.QoSClass) *KubevirtComputeApplyConfiguration { - b.QosClass = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtdiskimage.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtdiskimage.go deleted file mode 100644 index fdc6dde82..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtdiskimage.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// KubevirtDiskImageApplyConfiguration represents an declarative configuration of the KubevirtDiskImage type for use -// with apply. -type KubevirtDiskImageApplyConfiguration struct { - ContainerDiskImage *string `json:"containerDiskImage,omitempty"` -} - -// KubevirtDiskImageApplyConfiguration constructs an declarative configuration of the KubevirtDiskImage type for use with -// apply. -func KubevirtDiskImage() *KubevirtDiskImageApplyConfiguration { - return &KubevirtDiskImageApplyConfiguration{} -} - -// WithContainerDiskImage sets the ContainerDiskImage field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ContainerDiskImage field is set to the value of the last call. -func (b *KubevirtDiskImageApplyConfiguration) WithContainerDiskImage(value string) *KubevirtDiskImageApplyConfiguration { - b.ContainerDiskImage = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtmanualstoragedriverconfig.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtmanualstoragedriverconfig.go deleted file mode 100644 index d6bf559dc..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtmanualstoragedriverconfig.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// KubevirtManualStorageDriverConfigApplyConfiguration represents an declarative configuration of the KubevirtManualStorageDriverConfig type for use -// with apply. -type KubevirtManualStorageDriverConfigApplyConfiguration struct { - StorageClassMapping []KubevirtStorageClassMappingApplyConfiguration `json:"storageClassMapping,omitempty"` - VolumeSnapshotClassMapping []KubevirtVolumeSnapshotClassMappingApplyConfiguration `json:"volumeSnapshotClassMapping,omitempty"` -} - -// KubevirtManualStorageDriverConfigApplyConfiguration constructs an declarative configuration of the KubevirtManualStorageDriverConfig type for use with -// apply. -func KubevirtManualStorageDriverConfig() *KubevirtManualStorageDriverConfigApplyConfiguration { - return &KubevirtManualStorageDriverConfigApplyConfiguration{} -} - -// WithStorageClassMapping adds the given value to the StorageClassMapping field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the StorageClassMapping field. -func (b *KubevirtManualStorageDriverConfigApplyConfiguration) WithStorageClassMapping(values ...*KubevirtStorageClassMappingApplyConfiguration) *KubevirtManualStorageDriverConfigApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithStorageClassMapping") - } - b.StorageClassMapping = append(b.StorageClassMapping, *values[i]) - } - return b -} - -// WithVolumeSnapshotClassMapping adds the given value to the VolumeSnapshotClassMapping field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the VolumeSnapshotClassMapping field. -func (b *KubevirtManualStorageDriverConfigApplyConfiguration) WithVolumeSnapshotClassMapping(values ...*KubevirtVolumeSnapshotClassMappingApplyConfiguration) *KubevirtManualStorageDriverConfigApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithVolumeSnapshotClassMapping") - } - b.VolumeSnapshotClassMapping = append(b.VolumeSnapshotClassMapping, *values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnetwork.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnetwork.go deleted file mode 100644 index 692595255..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnetwork.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// KubevirtNetworkApplyConfiguration represents an declarative configuration of the KubevirtNetwork type for use -// with apply. -type KubevirtNetworkApplyConfiguration struct { - Name *string `json:"name,omitempty"` -} - -// KubevirtNetworkApplyConfiguration constructs an declarative configuration of the KubevirtNetwork type for use with -// apply. -func KubevirtNetwork() *KubevirtNetworkApplyConfiguration { - return &KubevirtNetworkApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *KubevirtNetworkApplyConfiguration) WithName(value string) *KubevirtNetworkApplyConfiguration { - b.Name = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolplatform.go deleted file mode 100644 index f5ae52ccf..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolplatform.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// KubevirtNodePoolPlatformApplyConfiguration represents an declarative configuration of the KubevirtNodePoolPlatform type for use -// with apply. -type KubevirtNodePoolPlatformApplyConfiguration struct { - RootVolume *KubevirtRootVolumeApplyConfiguration `json:"rootVolume,omitempty"` - Compute *KubevirtComputeApplyConfiguration `json:"compute,omitempty"` - NetworkInterfaceMultiQueue *hypershiftv1alpha1.MultiQueueSetting `json:"networkInterfaceMultiqueue,omitempty"` - AdditionalNetworks []KubevirtNetworkApplyConfiguration `json:"additionalNetworks,omitempty"` - AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` -} - -// KubevirtNodePoolPlatformApplyConfiguration constructs an declarative configuration of the KubevirtNodePoolPlatform type for use with -// apply. -func KubevirtNodePoolPlatform() *KubevirtNodePoolPlatformApplyConfiguration { - return &KubevirtNodePoolPlatformApplyConfiguration{} -} - -// WithRootVolume sets the RootVolume field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RootVolume field is set to the value of the last call. -func (b *KubevirtNodePoolPlatformApplyConfiguration) WithRootVolume(value *KubevirtRootVolumeApplyConfiguration) *KubevirtNodePoolPlatformApplyConfiguration { - b.RootVolume = value - return b -} - -// WithCompute sets the Compute field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Compute field is set to the value of the last call. -func (b *KubevirtNodePoolPlatformApplyConfiguration) WithCompute(value *KubevirtComputeApplyConfiguration) *KubevirtNodePoolPlatformApplyConfiguration { - b.Compute = value - return b -} - -// WithNetworkInterfaceMultiQueue sets the NetworkInterfaceMultiQueue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NetworkInterfaceMultiQueue field is set to the value of the last call. -func (b *KubevirtNodePoolPlatformApplyConfiguration) WithNetworkInterfaceMultiQueue(value hypershiftv1alpha1.MultiQueueSetting) *KubevirtNodePoolPlatformApplyConfiguration { - b.NetworkInterfaceMultiQueue = &value - return b -} - -// WithAdditionalNetworks adds the given value to the AdditionalNetworks field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AdditionalNetworks field. -func (b *KubevirtNodePoolPlatformApplyConfiguration) WithAdditionalNetworks(values ...*KubevirtNetworkApplyConfiguration) *KubevirtNodePoolPlatformApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAdditionalNetworks") - } - b.AdditionalNetworks = append(b.AdditionalNetworks, *values[i]) - } - return b -} - -// WithAttachDefaultNetwork sets the AttachDefaultNetwork field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AttachDefaultNetwork field is set to the value of the last call. -func (b *KubevirtNodePoolPlatformApplyConfiguration) WithAttachDefaultNetwork(value bool) *KubevirtNodePoolPlatformApplyConfiguration { - b.AttachDefaultNetwork = &value - return b -} - -// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the NodeSelector field, -// overwriting an existing map entries in NodeSelector field with the same key. -func (b *KubevirtNodePoolPlatformApplyConfiguration) WithNodeSelector(entries map[string]string) *KubevirtNodePoolPlatformApplyConfiguration { - if b.NodeSelector == nil && len(entries) > 0 { - b.NodeSelector = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.NodeSelector[k] = v - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolstatus.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolstatus.go deleted file mode 100644 index 50bceb69b..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolstatus.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// KubeVirtNodePoolStatusApplyConfiguration represents an declarative configuration of the KubeVirtNodePoolStatus type for use -// with apply. -type KubeVirtNodePoolStatusApplyConfiguration struct { - CacheName *string `json:"cacheName,omitempty"` - Credentials *KubevirtPlatformCredentialsApplyConfiguration `json:"credentials,omitempty"` -} - -// KubeVirtNodePoolStatusApplyConfiguration constructs an declarative configuration of the KubeVirtNodePoolStatus type for use with -// apply. -func KubeVirtNodePoolStatus() *KubeVirtNodePoolStatusApplyConfiguration { - return &KubeVirtNodePoolStatusApplyConfiguration{} -} - -// WithCacheName sets the CacheName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CacheName field is set to the value of the last call. -func (b *KubeVirtNodePoolStatusApplyConfiguration) WithCacheName(value string) *KubeVirtNodePoolStatusApplyConfiguration { - b.CacheName = &value - return b -} - -// WithCredentials sets the Credentials field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Credentials field is set to the value of the last call. -func (b *KubeVirtNodePoolStatusApplyConfiguration) WithCredentials(value *KubevirtPlatformCredentialsApplyConfiguration) *KubeVirtNodePoolStatusApplyConfiguration { - b.Credentials = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtpersistentvolume.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtpersistentvolume.go deleted file mode 100644 index 67b2cbe35..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtpersistentvolume.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - v1 "k8s.io/api/core/v1" - resource "k8s.io/apimachinery/pkg/api/resource" -) - -// KubevirtPersistentVolumeApplyConfiguration represents an declarative configuration of the KubevirtPersistentVolume type for use -// with apply. -type KubevirtPersistentVolumeApplyConfiguration struct { - Size *resource.Quantity `json:"size,omitempty"` - StorageClass *string `json:"storageClass,omitempty"` - AccessModes []v1alpha1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` -} - -// KubevirtPersistentVolumeApplyConfiguration constructs an declarative configuration of the KubevirtPersistentVolume type for use with -// apply. -func KubevirtPersistentVolume() *KubevirtPersistentVolumeApplyConfiguration { - return &KubevirtPersistentVolumeApplyConfiguration{} -} - -// WithSize sets the Size field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Size field is set to the value of the last call. -func (b *KubevirtPersistentVolumeApplyConfiguration) WithSize(value resource.Quantity) *KubevirtPersistentVolumeApplyConfiguration { - b.Size = &value - return b -} - -// WithStorageClass sets the StorageClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StorageClass field is set to the value of the last call. -func (b *KubevirtPersistentVolumeApplyConfiguration) WithStorageClass(value string) *KubevirtPersistentVolumeApplyConfiguration { - b.StorageClass = &value - return b -} - -// WithAccessModes adds the given value to the AccessModes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AccessModes field. -func (b *KubevirtPersistentVolumeApplyConfiguration) WithAccessModes(values ...v1alpha1.PersistentVolumeAccessMode) *KubevirtPersistentVolumeApplyConfiguration { - for i := range values { - b.AccessModes = append(b.AccessModes, values[i]) - } - return b -} - -// WithVolumeMode sets the VolumeMode field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VolumeMode field is set to the value of the last call. -func (b *KubevirtPersistentVolumeApplyConfiguration) WithVolumeMode(value v1.PersistentVolumeMode) *KubevirtPersistentVolumeApplyConfiguration { - b.VolumeMode = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtplatformcredentials.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtplatformcredentials.go deleted file mode 100644 index 3e792ad79..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtplatformcredentials.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// KubevirtPlatformCredentialsApplyConfiguration represents an declarative configuration of the KubevirtPlatformCredentials type for use -// with apply. -type KubevirtPlatformCredentialsApplyConfiguration struct { - InfraKubeConfigSecret *KubeconfigSecretRefApplyConfiguration `json:"infraKubeConfigSecret,omitempty"` - InfraNamespace *string `json:"infraNamespace,omitempty"` -} - -// KubevirtPlatformCredentialsApplyConfiguration constructs an declarative configuration of the KubevirtPlatformCredentials type for use with -// apply. -func KubevirtPlatformCredentials() *KubevirtPlatformCredentialsApplyConfiguration { - return &KubevirtPlatformCredentialsApplyConfiguration{} -} - -// WithInfraKubeConfigSecret sets the InfraKubeConfigSecret field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InfraKubeConfigSecret field is set to the value of the last call. -func (b *KubevirtPlatformCredentialsApplyConfiguration) WithInfraKubeConfigSecret(value *KubeconfigSecretRefApplyConfiguration) *KubevirtPlatformCredentialsApplyConfiguration { - b.InfraKubeConfigSecret = value - return b -} - -// WithInfraNamespace sets the InfraNamespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InfraNamespace field is set to the value of the last call. -func (b *KubevirtPlatformCredentialsApplyConfiguration) WithInfraNamespace(value string) *KubevirtPlatformCredentialsApplyConfiguration { - b.InfraNamespace = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtplatformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtplatformspec.go deleted file mode 100644 index 51ed8c151..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtplatformspec.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// KubevirtPlatformSpecApplyConfiguration represents an declarative configuration of the KubevirtPlatformSpec type for use -// with apply. -type KubevirtPlatformSpecApplyConfiguration struct { - BaseDomainPassthrough *bool `json:"baseDomainPassthrough,omitempty"` - GenerateID *string `json:"generateID,omitempty"` - Credentials *KubevirtPlatformCredentialsApplyConfiguration `json:"credentials,omitempty"` - StorageDriver *KubevirtStorageDriverSpecApplyConfiguration `json:"storageDriver,omitempty"` -} - -// KubevirtPlatformSpecApplyConfiguration constructs an declarative configuration of the KubevirtPlatformSpec type for use with -// apply. -func KubevirtPlatformSpec() *KubevirtPlatformSpecApplyConfiguration { - return &KubevirtPlatformSpecApplyConfiguration{} -} - -// WithBaseDomainPassthrough sets the BaseDomainPassthrough field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the BaseDomainPassthrough field is set to the value of the last call. -func (b *KubevirtPlatformSpecApplyConfiguration) WithBaseDomainPassthrough(value bool) *KubevirtPlatformSpecApplyConfiguration { - b.BaseDomainPassthrough = &value - return b -} - -// WithGenerateID sets the GenerateID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateID field is set to the value of the last call. -func (b *KubevirtPlatformSpecApplyConfiguration) WithGenerateID(value string) *KubevirtPlatformSpecApplyConfiguration { - b.GenerateID = &value - return b -} - -// WithCredentials sets the Credentials field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Credentials field is set to the value of the last call. -func (b *KubevirtPlatformSpecApplyConfiguration) WithCredentials(value *KubevirtPlatformCredentialsApplyConfiguration) *KubevirtPlatformSpecApplyConfiguration { - b.Credentials = value - return b -} - -// WithStorageDriver sets the StorageDriver field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StorageDriver field is set to the value of the last call. -func (b *KubevirtPlatformSpecApplyConfiguration) WithStorageDriver(value *KubevirtStorageDriverSpecApplyConfiguration) *KubevirtPlatformSpecApplyConfiguration { - b.StorageDriver = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtrootvolume.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtrootvolume.go deleted file mode 100644 index 56f4487d9..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtrootvolume.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// KubevirtRootVolumeApplyConfiguration represents an declarative configuration of the KubevirtRootVolume type for use -// with apply. -type KubevirtRootVolumeApplyConfiguration struct { - Image *KubevirtDiskImageApplyConfiguration `json:"diskImage,omitempty"` - KubevirtVolumeApplyConfiguration `json:",inline"` - CacheStrategy *KubevirtCachingStrategyApplyConfiguration `json:"cacheStrategy,omitempty"` -} - -// KubevirtRootVolumeApplyConfiguration constructs an declarative configuration of the KubevirtRootVolume type for use with -// apply. -func KubevirtRootVolume() *KubevirtRootVolumeApplyConfiguration { - return &KubevirtRootVolumeApplyConfiguration{} -} - -// WithImage sets the Image field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Image field is set to the value of the last call. -func (b *KubevirtRootVolumeApplyConfiguration) WithImage(value *KubevirtDiskImageApplyConfiguration) *KubevirtRootVolumeApplyConfiguration { - b.Image = value - return b -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *KubevirtRootVolumeApplyConfiguration) WithType(value hypershiftv1alpha1.KubevirtVolumeType) *KubevirtRootVolumeApplyConfiguration { - b.Type = &value - return b -} - -// WithPersistent sets the Persistent field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Persistent field is set to the value of the last call. -func (b *KubevirtRootVolumeApplyConfiguration) WithPersistent(value *KubevirtPersistentVolumeApplyConfiguration) *KubevirtRootVolumeApplyConfiguration { - b.Persistent = value - return b -} - -// WithCacheStrategy sets the CacheStrategy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CacheStrategy field is set to the value of the last call. -func (b *KubevirtRootVolumeApplyConfiguration) WithCacheStrategy(value *KubevirtCachingStrategyApplyConfiguration) *KubevirtRootVolumeApplyConfiguration { - b.CacheStrategy = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtstorageclassmapping.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtstorageclassmapping.go deleted file mode 100644 index 97177c49c..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtstorageclassmapping.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// KubevirtStorageClassMappingApplyConfiguration represents an declarative configuration of the KubevirtStorageClassMapping type for use -// with apply. -type KubevirtStorageClassMappingApplyConfiguration struct { - Group *string `json:"group,omitempty"` - InfraStorageClassName *string `json:"infraStorageClassName,omitempty"` - GuestStorageClassName *string `json:"guestStorageClassName,omitempty"` -} - -// KubevirtStorageClassMappingApplyConfiguration constructs an declarative configuration of the KubevirtStorageClassMapping type for use with -// apply. -func KubevirtStorageClassMapping() *KubevirtStorageClassMappingApplyConfiguration { - return &KubevirtStorageClassMappingApplyConfiguration{} -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *KubevirtStorageClassMappingApplyConfiguration) WithGroup(value string) *KubevirtStorageClassMappingApplyConfiguration { - b.Group = &value - return b -} - -// WithInfraStorageClassName sets the InfraStorageClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InfraStorageClassName field is set to the value of the last call. -func (b *KubevirtStorageClassMappingApplyConfiguration) WithInfraStorageClassName(value string) *KubevirtStorageClassMappingApplyConfiguration { - b.InfraStorageClassName = &value - return b -} - -// WithGuestStorageClassName sets the GuestStorageClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GuestStorageClassName field is set to the value of the last call. -func (b *KubevirtStorageClassMappingApplyConfiguration) WithGuestStorageClassName(value string) *KubevirtStorageClassMappingApplyConfiguration { - b.GuestStorageClassName = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtstoragedriverspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtstoragedriverspec.go deleted file mode 100644 index f41c65f8b..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtstoragedriverspec.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// KubevirtStorageDriverSpecApplyConfiguration represents an declarative configuration of the KubevirtStorageDriverSpec type for use -// with apply. -type KubevirtStorageDriverSpecApplyConfiguration struct { - Type *v1alpha1.KubevirtStorageDriverConfigType `json:"type,omitempty"` - Manual *KubevirtManualStorageDriverConfigApplyConfiguration `json:"manual,omitempty"` -} - -// KubevirtStorageDriverSpecApplyConfiguration constructs an declarative configuration of the KubevirtStorageDriverSpec type for use with -// apply. -func KubevirtStorageDriverSpec() *KubevirtStorageDriverSpecApplyConfiguration { - return &KubevirtStorageDriverSpecApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *KubevirtStorageDriverSpecApplyConfiguration) WithType(value v1alpha1.KubevirtStorageDriverConfigType) *KubevirtStorageDriverSpecApplyConfiguration { - b.Type = &value - return b -} - -// WithManual sets the Manual field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Manual field is set to the value of the last call. -func (b *KubevirtStorageDriverSpecApplyConfiguration) WithManual(value *KubevirtManualStorageDriverConfigApplyConfiguration) *KubevirtStorageDriverSpecApplyConfiguration { - b.Manual = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtvolume.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtvolume.go deleted file mode 100644 index 0d637b967..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtvolume.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// KubevirtVolumeApplyConfiguration represents an declarative configuration of the KubevirtVolume type for use -// with apply. -type KubevirtVolumeApplyConfiguration struct { - Type *v1alpha1.KubevirtVolumeType `json:"type,omitempty"` - Persistent *KubevirtPersistentVolumeApplyConfiguration `json:"persistent,omitempty"` -} - -// KubevirtVolumeApplyConfiguration constructs an declarative configuration of the KubevirtVolume type for use with -// apply. -func KubevirtVolume() *KubevirtVolumeApplyConfiguration { - return &KubevirtVolumeApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *KubevirtVolumeApplyConfiguration) WithType(value v1alpha1.KubevirtVolumeType) *KubevirtVolumeApplyConfiguration { - b.Type = &value - return b -} - -// WithPersistent sets the Persistent field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Persistent field is set to the value of the last call. -func (b *KubevirtVolumeApplyConfiguration) WithPersistent(value *KubevirtPersistentVolumeApplyConfiguration) *KubevirtVolumeApplyConfiguration { - b.Persistent = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtvolumesnapshotclassmapping.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtvolumesnapshotclassmapping.go deleted file mode 100644 index be1049f90..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubevirtvolumesnapshotclassmapping.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// KubevirtVolumeSnapshotClassMappingApplyConfiguration represents an declarative configuration of the KubevirtVolumeSnapshotClassMapping type for use -// with apply. -type KubevirtVolumeSnapshotClassMappingApplyConfiguration struct { - Group *string `json:"group,omitempty"` - InfraVolumeSnapshotClassName *string `json:"infraVolumeSnapshotClassName,omitempty"` - GuestVolumeSnapshotClassName *string `json:"guestVolumeSnapshotClassName,omitempty"` -} - -// KubevirtVolumeSnapshotClassMappingApplyConfiguration constructs an declarative configuration of the KubevirtVolumeSnapshotClassMapping type for use with -// apply. -func KubevirtVolumeSnapshotClassMapping() *KubevirtVolumeSnapshotClassMappingApplyConfiguration { - return &KubevirtVolumeSnapshotClassMappingApplyConfiguration{} -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *KubevirtVolumeSnapshotClassMappingApplyConfiguration) WithGroup(value string) *KubevirtVolumeSnapshotClassMappingApplyConfiguration { - b.Group = &value - return b -} - -// WithInfraVolumeSnapshotClassName sets the InfraVolumeSnapshotClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InfraVolumeSnapshotClassName field is set to the value of the last call. -func (b *KubevirtVolumeSnapshotClassMappingApplyConfiguration) WithInfraVolumeSnapshotClassName(value string) *KubevirtVolumeSnapshotClassMappingApplyConfiguration { - b.InfraVolumeSnapshotClassName = &value - return b -} - -// WithGuestVolumeSnapshotClassName sets the GuestVolumeSnapshotClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GuestVolumeSnapshotClassName field is set to the value of the last call. -func (b *KubevirtVolumeSnapshotClassMappingApplyConfiguration) WithGuestVolumeSnapshotClassName(value string) *KubevirtVolumeSnapshotClassMappingApplyConfiguration { - b.GuestVolumeSnapshotClassName = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/loadbalancerpublishingstrategy.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/loadbalancerpublishingstrategy.go deleted file mode 100644 index 3d8dcba82..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/loadbalancerpublishingstrategy.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// LoadBalancerPublishingStrategyApplyConfiguration represents an declarative configuration of the LoadBalancerPublishingStrategy type for use -// with apply. -type LoadBalancerPublishingStrategyApplyConfiguration struct { - Hostname *string `json:"hostname,omitempty"` -} - -// LoadBalancerPublishingStrategyApplyConfiguration constructs an declarative configuration of the LoadBalancerPublishingStrategy type for use with -// apply. -func LoadBalancerPublishingStrategy() *LoadBalancerPublishingStrategyApplyConfiguration { - return &LoadBalancerPublishingStrategyApplyConfiguration{} -} - -// WithHostname sets the Hostname field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Hostname field is set to the value of the last call. -func (b *LoadBalancerPublishingStrategyApplyConfiguration) WithHostname(value string) *LoadBalancerPublishingStrategyApplyConfiguration { - b.Hostname = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/machinenetworkentry.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/machinenetworkentry.go deleted file mode 100644 index eacaadb4b..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/machinenetworkentry.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - ipnet "github.com/openshift/hypershift/api/util/ipnet" -) - -// MachineNetworkEntryApplyConfiguration represents an declarative configuration of the MachineNetworkEntry type for use -// with apply. -type MachineNetworkEntryApplyConfiguration struct { - CIDR *ipnet.IPNet `json:"cidr,omitempty"` -} - -// MachineNetworkEntryApplyConfiguration constructs an declarative configuration of the MachineNetworkEntry type for use with -// apply. -func MachineNetworkEntry() *MachineNetworkEntryApplyConfiguration { - return &MachineNetworkEntryApplyConfiguration{} -} - -// WithCIDR sets the CIDR field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CIDR field is set to the value of the last call. -func (b *MachineNetworkEntryApplyConfiguration) WithCIDR(value ipnet.IPNet) *MachineNetworkEntryApplyConfiguration { - b.CIDR = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/managedetcdstoragespec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/managedetcdstoragespec.go deleted file mode 100644 index 3499d67a8..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/managedetcdstoragespec.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// ManagedEtcdStorageSpecApplyConfiguration represents an declarative configuration of the ManagedEtcdStorageSpec type for use -// with apply. -type ManagedEtcdStorageSpecApplyConfiguration struct { - Type *v1alpha1.ManagedEtcdStorageType `json:"type,omitempty"` - PersistentVolume *PersistentVolumeEtcdStorageSpecApplyConfiguration `json:"persistentVolume,omitempty"` - RestoreSnapshotURL []string `json:"restoreSnapshotURL,omitempty"` -} - -// ManagedEtcdStorageSpecApplyConfiguration constructs an declarative configuration of the ManagedEtcdStorageSpec type for use with -// apply. -func ManagedEtcdStorageSpec() *ManagedEtcdStorageSpecApplyConfiguration { - return &ManagedEtcdStorageSpecApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *ManagedEtcdStorageSpecApplyConfiguration) WithType(value v1alpha1.ManagedEtcdStorageType) *ManagedEtcdStorageSpecApplyConfiguration { - b.Type = &value - return b -} - -// WithPersistentVolume sets the PersistentVolume field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PersistentVolume field is set to the value of the last call. -func (b *ManagedEtcdStorageSpecApplyConfiguration) WithPersistentVolume(value *PersistentVolumeEtcdStorageSpecApplyConfiguration) *ManagedEtcdStorageSpecApplyConfiguration { - b.PersistentVolume = value - return b -} - -// WithRestoreSnapshotURL adds the given value to the RestoreSnapshotURL field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RestoreSnapshotURL field. -func (b *ManagedEtcdStorageSpecApplyConfiguration) WithRestoreSnapshotURL(values ...string) *ManagedEtcdStorageSpecApplyConfiguration { - for i := range values { - b.RestoreSnapshotURL = append(b.RestoreSnapshotURL, values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepool.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepool.go deleted file mode 100644 index 66fc3c196..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepool.go +++ /dev/null @@ -1,218 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// NodePoolApplyConfiguration represents an declarative configuration of the NodePool type for use -// with apply. -type NodePoolApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NodePoolSpecApplyConfiguration `json:"spec,omitempty"` - Status *NodePoolStatusApplyConfiguration `json:"status,omitempty"` -} - -// NodePool constructs an declarative configuration of the NodePool type for use with -// apply. -func NodePool(name, namespace string) *NodePoolApplyConfiguration { - b := &NodePoolApplyConfiguration{} - b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("NodePool") - b.WithAPIVersion("hypershift.openshift.io/v1alpha1") - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithKind(value string) *NodePoolApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithAPIVersion(value string) *NodePoolApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithName(value string) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithGenerateName(value string) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithNamespace(value string) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithUID(value types.UID) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithResourceVersion(value string) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithGeneration(value int64) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *NodePoolApplyConfiguration) WithLabels(entries map[string]string) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *NodePoolApplyConfiguration) WithAnnotations(entries map[string]string) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *NodePoolApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *NodePoolApplyConfiguration) WithFinalizers(values ...string) *NodePoolApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *NodePoolApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithSpec(value *NodePoolSpecApplyConfiguration) *NodePoolApplyConfiguration { - b.Spec = value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *NodePoolApplyConfiguration) WithStatus(value *NodePoolStatusApplyConfiguration) *NodePoolApplyConfiguration { - b.Status = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolautoscaling.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolautoscaling.go deleted file mode 100644 index d0ceb375c..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolautoscaling.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// NodePoolAutoScalingApplyConfiguration represents an declarative configuration of the NodePoolAutoScaling type for use -// with apply. -type NodePoolAutoScalingApplyConfiguration struct { - Min *int32 `json:"min,omitempty"` - Max *int32 `json:"max,omitempty"` -} - -// NodePoolAutoScalingApplyConfiguration constructs an declarative configuration of the NodePoolAutoScaling type for use with -// apply. -func NodePoolAutoScaling() *NodePoolAutoScalingApplyConfiguration { - return &NodePoolAutoScalingApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *NodePoolAutoScalingApplyConfiguration) WithMin(value int32) *NodePoolAutoScalingApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *NodePoolAutoScalingApplyConfiguration) WithMax(value int32) *NodePoolAutoScalingApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolcondition.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolcondition.go deleted file mode 100644 index 276cadf87..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolcondition.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// NodePoolConditionApplyConfiguration represents an declarative configuration of the NodePoolCondition type for use -// with apply. -type NodePoolConditionApplyConfiguration struct { - Type *string `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - Severity *string `json:"severity,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` -} - -// NodePoolConditionApplyConfiguration constructs an declarative configuration of the NodePoolCondition type for use with -// apply. -func NodePoolCondition() *NodePoolConditionApplyConfiguration { - return &NodePoolConditionApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *NodePoolConditionApplyConfiguration) WithType(value string) *NodePoolConditionApplyConfiguration { - b.Type = &value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *NodePoolConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *NodePoolConditionApplyConfiguration { - b.Status = &value - return b -} - -// WithSeverity sets the Severity field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Severity field is set to the value of the last call. -func (b *NodePoolConditionApplyConfiguration) WithSeverity(value string) *NodePoolConditionApplyConfiguration { - b.Severity = &value - return b -} - -// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LastTransitionTime field is set to the value of the last call. -func (b *NodePoolConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *NodePoolConditionApplyConfiguration { - b.LastTransitionTime = &value - return b -} - -// WithReason sets the Reason field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Reason field is set to the value of the last call. -func (b *NodePoolConditionApplyConfiguration) WithReason(value string) *NodePoolConditionApplyConfiguration { - b.Reason = &value - return b -} - -// WithMessage sets the Message field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Message field is set to the value of the last call. -func (b *NodePoolConditionApplyConfiguration) WithMessage(value string) *NodePoolConditionApplyConfiguration { - b.Message = &value - return b -} - -// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ObservedGeneration field is set to the value of the last call. -func (b *NodePoolConditionApplyConfiguration) WithObservedGeneration(value int64) *NodePoolConditionApplyConfiguration { - b.ObservedGeneration = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolmanagement.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolmanagement.go deleted file mode 100644 index 6b3a4d827..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolmanagement.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// NodePoolManagementApplyConfiguration represents an declarative configuration of the NodePoolManagement type for use -// with apply. -type NodePoolManagementApplyConfiguration struct { - UpgradeType *v1alpha1.UpgradeType `json:"upgradeType,omitempty"` - Replace *ReplaceUpgradeApplyConfiguration `json:"replace,omitempty"` - InPlace *InPlaceUpgradeApplyConfiguration `json:"inPlace,omitempty"` - AutoRepair *bool `json:"autoRepair,omitempty"` -} - -// NodePoolManagementApplyConfiguration constructs an declarative configuration of the NodePoolManagement type for use with -// apply. -func NodePoolManagement() *NodePoolManagementApplyConfiguration { - return &NodePoolManagementApplyConfiguration{} -} - -// WithUpgradeType sets the UpgradeType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UpgradeType field is set to the value of the last call. -func (b *NodePoolManagementApplyConfiguration) WithUpgradeType(value v1alpha1.UpgradeType) *NodePoolManagementApplyConfiguration { - b.UpgradeType = &value - return b -} - -// WithReplace sets the Replace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Replace field is set to the value of the last call. -func (b *NodePoolManagementApplyConfiguration) WithReplace(value *ReplaceUpgradeApplyConfiguration) *NodePoolManagementApplyConfiguration { - b.Replace = value - return b -} - -// WithInPlace sets the InPlace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InPlace field is set to the value of the last call. -func (b *NodePoolManagementApplyConfiguration) WithInPlace(value *InPlaceUpgradeApplyConfiguration) *NodePoolManagementApplyConfiguration { - b.InPlace = value - return b -} - -// WithAutoRepair sets the AutoRepair field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AutoRepair field is set to the value of the last call. -func (b *NodePoolManagementApplyConfiguration) WithAutoRepair(value bool) *NodePoolManagementApplyConfiguration { - b.AutoRepair = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolplatform.go deleted file mode 100644 index 295890b6d..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolplatform.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// NodePoolPlatformApplyConfiguration represents an declarative configuration of the NodePoolPlatform type for use -// with apply. -type NodePoolPlatformApplyConfiguration struct { - Type *v1alpha1.PlatformType `json:"type,omitempty"` - AWS *AWSNodePoolPlatformApplyConfiguration `json:"aws,omitempty"` - IBMCloud *IBMCloudPlatformSpecApplyConfiguration `json:"ibmcloud,omitempty"` - Kubevirt *KubevirtNodePoolPlatformApplyConfiguration `json:"kubevirt,omitempty"` - Agent *AgentNodePoolPlatformApplyConfiguration `json:"agent,omitempty"` - Azure *AzureNodePoolPlatformApplyConfiguration `json:"azure,omitempty"` - PowerVS *PowerVSNodePoolPlatformApplyConfiguration `json:"powervs,omitempty"` -} - -// NodePoolPlatformApplyConfiguration constructs an declarative configuration of the NodePoolPlatform type for use with -// apply. -func NodePoolPlatform() *NodePoolPlatformApplyConfiguration { - return &NodePoolPlatformApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *NodePoolPlatformApplyConfiguration) WithType(value v1alpha1.PlatformType) *NodePoolPlatformApplyConfiguration { - b.Type = &value - return b -} - -// WithAWS sets the AWS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AWS field is set to the value of the last call. -func (b *NodePoolPlatformApplyConfiguration) WithAWS(value *AWSNodePoolPlatformApplyConfiguration) *NodePoolPlatformApplyConfiguration { - b.AWS = value - return b -} - -// WithIBMCloud sets the IBMCloud field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IBMCloud field is set to the value of the last call. -func (b *NodePoolPlatformApplyConfiguration) WithIBMCloud(value *IBMCloudPlatformSpecApplyConfiguration) *NodePoolPlatformApplyConfiguration { - b.IBMCloud = value - return b -} - -// WithKubevirt sets the Kubevirt field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kubevirt field is set to the value of the last call. -func (b *NodePoolPlatformApplyConfiguration) WithKubevirt(value *KubevirtNodePoolPlatformApplyConfiguration) *NodePoolPlatformApplyConfiguration { - b.Kubevirt = value - return b -} - -// WithAgent sets the Agent field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Agent field is set to the value of the last call. -func (b *NodePoolPlatformApplyConfiguration) WithAgent(value *AgentNodePoolPlatformApplyConfiguration) *NodePoolPlatformApplyConfiguration { - b.Agent = value - return b -} - -// WithAzure sets the Azure field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Azure field is set to the value of the last call. -func (b *NodePoolPlatformApplyConfiguration) WithAzure(value *AzureNodePoolPlatformApplyConfiguration) *NodePoolPlatformApplyConfiguration { - b.Azure = value - return b -} - -// WithPowerVS sets the PowerVS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PowerVS field is set to the value of the last call. -func (b *NodePoolPlatformApplyConfiguration) WithPowerVS(value *PowerVSNodePoolPlatformApplyConfiguration) *NodePoolPlatformApplyConfiguration { - b.PowerVS = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolplatformstatus.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolplatformstatus.go deleted file mode 100644 index 1dc1d2e94..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolplatformstatus.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// NodePoolPlatformStatusApplyConfiguration represents an declarative configuration of the NodePoolPlatformStatus type for use -// with apply. -type NodePoolPlatformStatusApplyConfiguration struct { - KubeVirt *KubeVirtNodePoolStatusApplyConfiguration `json:"kubeVirt,omitempty"` -} - -// NodePoolPlatformStatusApplyConfiguration constructs an declarative configuration of the NodePoolPlatformStatus type for use with -// apply. -func NodePoolPlatformStatus() *NodePoolPlatformStatusApplyConfiguration { - return &NodePoolPlatformStatusApplyConfiguration{} -} - -// WithKubeVirt sets the KubeVirt field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KubeVirt field is set to the value of the last call. -func (b *NodePoolPlatformStatusApplyConfiguration) WithKubeVirt(value *KubeVirtNodePoolStatusApplyConfiguration) *NodePoolPlatformStatusApplyConfiguration { - b.KubeVirt = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolspec.go deleted file mode 100644 index df5f52175..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolspec.go +++ /dev/null @@ -1,184 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// NodePoolSpecApplyConfiguration represents an declarative configuration of the NodePoolSpec type for use -// with apply. -type NodePoolSpecApplyConfiguration struct { - ClusterName *string `json:"clusterName,omitempty"` - Release *ReleaseApplyConfiguration `json:"release,omitempty"` - Platform *NodePoolPlatformApplyConfiguration `json:"platform,omitempty"` - NodeCount *int32 `json:"nodeCount,omitempty"` - Replicas *int32 `json:"replicas,omitempty"` - Management *NodePoolManagementApplyConfiguration `json:"management,omitempty"` - AutoScaling *NodePoolAutoScalingApplyConfiguration `json:"autoScaling,omitempty"` - Config []v1.LocalObjectReference `json:"config,omitempty"` - NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` - NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` - NodeLabels map[string]string `json:"nodeLabels,omitempty"` - Taints []TaintApplyConfiguration `json:"taints,omitempty"` - PausedUntil *string `json:"pausedUntil,omitempty"` - TuningConfig []v1.LocalObjectReference `json:"tuningConfig,omitempty"` - Arch *string `json:"arch,omitempty"` -} - -// NodePoolSpecApplyConfiguration constructs an declarative configuration of the NodePoolSpec type for use with -// apply. -func NodePoolSpec() *NodePoolSpecApplyConfiguration { - return &NodePoolSpecApplyConfiguration{} -} - -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithClusterName(value string) *NodePoolSpecApplyConfiguration { - b.ClusterName = &value - return b -} - -// WithRelease sets the Release field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Release field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithRelease(value *ReleaseApplyConfiguration) *NodePoolSpecApplyConfiguration { - b.Release = value - return b -} - -// WithPlatform sets the Platform field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Platform field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithPlatform(value *NodePoolPlatformApplyConfiguration) *NodePoolSpecApplyConfiguration { - b.Platform = value - return b -} - -// WithNodeCount sets the NodeCount field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeCount field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithNodeCount(value int32) *NodePoolSpecApplyConfiguration { - b.NodeCount = &value - return b -} - -// WithReplicas sets the Replicas field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Replicas field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithReplicas(value int32) *NodePoolSpecApplyConfiguration { - b.Replicas = &value - return b -} - -// WithManagement sets the Management field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Management field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithManagement(value *NodePoolManagementApplyConfiguration) *NodePoolSpecApplyConfiguration { - b.Management = value - return b -} - -// WithAutoScaling sets the AutoScaling field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AutoScaling field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithAutoScaling(value *NodePoolAutoScalingApplyConfiguration) *NodePoolSpecApplyConfiguration { - b.AutoScaling = value - return b -} - -// WithConfig adds the given value to the Config field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Config field. -func (b *NodePoolSpecApplyConfiguration) WithConfig(values ...v1.LocalObjectReference) *NodePoolSpecApplyConfiguration { - for i := range values { - b.Config = append(b.Config, values[i]) - } - return b -} - -// WithNodeDrainTimeout sets the NodeDrainTimeout field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeDrainTimeout field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithNodeDrainTimeout(value metav1.Duration) *NodePoolSpecApplyConfiguration { - b.NodeDrainTimeout = &value - return b -} - -// WithNodeVolumeDetachTimeout sets the NodeVolumeDetachTimeout field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeVolumeDetachTimeout field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithNodeVolumeDetachTimeout(value metav1.Duration) *NodePoolSpecApplyConfiguration { - b.NodeVolumeDetachTimeout = &value - return b -} - -// WithNodeLabels puts the entries into the NodeLabels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the NodeLabels field, -// overwriting an existing map entries in NodeLabels field with the same key. -func (b *NodePoolSpecApplyConfiguration) WithNodeLabels(entries map[string]string) *NodePoolSpecApplyConfiguration { - if b.NodeLabels == nil && len(entries) > 0 { - b.NodeLabels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.NodeLabels[k] = v - } - return b -} - -// WithTaints adds the given value to the Taints field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Taints field. -func (b *NodePoolSpecApplyConfiguration) WithTaints(values ...*TaintApplyConfiguration) *NodePoolSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithTaints") - } - b.Taints = append(b.Taints, *values[i]) - } - return b -} - -// WithPausedUntil sets the PausedUntil field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PausedUntil field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithPausedUntil(value string) *NodePoolSpecApplyConfiguration { - b.PausedUntil = &value - return b -} - -// WithTuningConfig adds the given value to the TuningConfig field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the TuningConfig field. -func (b *NodePoolSpecApplyConfiguration) WithTuningConfig(values ...v1.LocalObjectReference) *NodePoolSpecApplyConfiguration { - for i := range values { - b.TuningConfig = append(b.TuningConfig, values[i]) - } - return b -} - -// WithArch sets the Arch field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Arch field is set to the value of the last call. -func (b *NodePoolSpecApplyConfiguration) WithArch(value string) *NodePoolSpecApplyConfiguration { - b.Arch = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolstatus.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolstatus.go deleted file mode 100644 index 017322568..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodepoolstatus.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// NodePoolStatusApplyConfiguration represents an declarative configuration of the NodePoolStatus type for use -// with apply. -type NodePoolStatusApplyConfiguration struct { - Replicas *int32 `json:"replicas,omitempty"` - Version *string `json:"version,omitempty"` - Platform *NodePoolPlatformStatusApplyConfiguration `json:"platform,omitempty"` - Conditions []NodePoolConditionApplyConfiguration `json:"conditions,omitempty"` -} - -// NodePoolStatusApplyConfiguration constructs an declarative configuration of the NodePoolStatus type for use with -// apply. -func NodePoolStatus() *NodePoolStatusApplyConfiguration { - return &NodePoolStatusApplyConfiguration{} -} - -// WithReplicas sets the Replicas field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Replicas field is set to the value of the last call. -func (b *NodePoolStatusApplyConfiguration) WithReplicas(value int32) *NodePoolStatusApplyConfiguration { - b.Replicas = &value - return b -} - -// WithVersion sets the Version field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Version field is set to the value of the last call. -func (b *NodePoolStatusApplyConfiguration) WithVersion(value string) *NodePoolStatusApplyConfiguration { - b.Version = &value - return b -} - -// WithPlatform sets the Platform field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Platform field is set to the value of the last call. -func (b *NodePoolStatusApplyConfiguration) WithPlatform(value *NodePoolPlatformStatusApplyConfiguration) *NodePoolStatusApplyConfiguration { - b.Platform = value - return b -} - -// WithConditions adds the given value to the Conditions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *NodePoolStatusApplyConfiguration) WithConditions(values ...*NodePoolConditionApplyConfiguration) *NodePoolStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithConditions") - } - b.Conditions = append(b.Conditions, *values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodeportpublishingstrategy.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodeportpublishingstrategy.go deleted file mode 100644 index b51ffb843..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/nodeportpublishingstrategy.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// NodePortPublishingStrategyApplyConfiguration represents an declarative configuration of the NodePortPublishingStrategy type for use -// with apply. -type NodePortPublishingStrategyApplyConfiguration struct { - Address *string `json:"address,omitempty"` - Port *int32 `json:"port,omitempty"` -} - -// NodePortPublishingStrategyApplyConfiguration constructs an declarative configuration of the NodePortPublishingStrategy type for use with -// apply. -func NodePortPublishingStrategy() *NodePortPublishingStrategyApplyConfiguration { - return &NodePortPublishingStrategyApplyConfiguration{} -} - -// WithAddress sets the Address field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Address field is set to the value of the last call. -func (b *NodePortPublishingStrategyApplyConfiguration) WithAddress(value string) *NodePortPublishingStrategyApplyConfiguration { - b.Address = &value - return b -} - -// WithPort sets the Port field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Port field is set to the value of the last call. -func (b *NodePortPublishingStrategyApplyConfiguration) WithPort(value int32) *NodePortPublishingStrategyApplyConfiguration { - b.Port = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/persistentvolumeetcdstoragespec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/persistentvolumeetcdstoragespec.go deleted file mode 100644 index ffd92d70f..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/persistentvolumeetcdstoragespec.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" -) - -// PersistentVolumeEtcdStorageSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeEtcdStorageSpec type for use -// with apply. -type PersistentVolumeEtcdStorageSpecApplyConfiguration struct { - StorageClassName *string `json:"storageClassName,omitempty"` - Size *resource.Quantity `json:"size,omitempty"` -} - -// PersistentVolumeEtcdStorageSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeEtcdStorageSpec type for use with -// apply. -func PersistentVolumeEtcdStorageSpec() *PersistentVolumeEtcdStorageSpecApplyConfiguration { - return &PersistentVolumeEtcdStorageSpecApplyConfiguration{} -} - -// WithStorageClassName sets the StorageClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StorageClassName field is set to the value of the last call. -func (b *PersistentVolumeEtcdStorageSpecApplyConfiguration) WithStorageClassName(value string) *PersistentVolumeEtcdStorageSpecApplyConfiguration { - b.StorageClassName = &value - return b -} - -// WithSize sets the Size field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Size field is set to the value of the last call. -func (b *PersistentVolumeEtcdStorageSpecApplyConfiguration) WithSize(value resource.Quantity) *PersistentVolumeEtcdStorageSpecApplyConfiguration { - b.Size = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/platformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/platformspec.go deleted file mode 100644 index 9c3137628..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/platformspec.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// PlatformSpecApplyConfiguration represents an declarative configuration of the PlatformSpec type for use -// with apply. -type PlatformSpecApplyConfiguration struct { - Type *v1alpha1.PlatformType `json:"type,omitempty"` - AWS *AWSPlatformSpecApplyConfiguration `json:"aws,omitempty"` - Agent *AgentPlatformSpecApplyConfiguration `json:"agent,omitempty"` - IBMCloud *IBMCloudPlatformSpecApplyConfiguration `json:"ibmcloud,omitempty"` - Azure *AzurePlatformSpecApplyConfiguration `json:"azure,omitempty"` - PowerVS *PowerVSPlatformSpecApplyConfiguration `json:"powervs,omitempty"` - Kubevirt *KubevirtPlatformSpecApplyConfiguration `json:"kubevirt,omitempty"` -} - -// PlatformSpecApplyConfiguration constructs an declarative configuration of the PlatformSpec type for use with -// apply. -func PlatformSpec() *PlatformSpecApplyConfiguration { - return &PlatformSpecApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithType(value v1alpha1.PlatformType) *PlatformSpecApplyConfiguration { - b.Type = &value - return b -} - -// WithAWS sets the AWS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AWS field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithAWS(value *AWSPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { - b.AWS = value - return b -} - -// WithAgent sets the Agent field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Agent field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithAgent(value *AgentPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { - b.Agent = value - return b -} - -// WithIBMCloud sets the IBMCloud field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IBMCloud field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithIBMCloud(value *IBMCloudPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { - b.IBMCloud = value - return b -} - -// WithAzure sets the Azure field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Azure field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithAzure(value *AzurePlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { - b.Azure = value - return b -} - -// WithPowerVS sets the PowerVS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PowerVS field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithPowerVS(value *PowerVSPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { - b.PowerVS = value - return b -} - -// WithKubevirt sets the Kubevirt field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kubevirt field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithKubevirt(value *KubevirtPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { - b.Kubevirt = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/platformstatus.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/platformstatus.go deleted file mode 100644 index b0e9cc825..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/platformstatus.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// PlatformStatusApplyConfiguration represents an declarative configuration of the PlatformStatus type for use -// with apply. -type PlatformStatusApplyConfiguration struct { - AWS *AWSPlatformStatusApplyConfiguration `json:"aws,omitempty"` -} - -// PlatformStatusApplyConfiguration constructs an declarative configuration of the PlatformStatus type for use with -// apply. -func PlatformStatus() *PlatformStatusApplyConfiguration { - return &PlatformStatusApplyConfiguration{} -} - -// WithAWS sets the AWS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AWS field is set to the value of the last call. -func (b *PlatformStatusApplyConfiguration) WithAWS(value *AWSPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration { - b.AWS = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsnodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsnodepoolplatform.go deleted file mode 100644 index bbd562002..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsnodepoolplatform.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// PowerVSNodePoolPlatformApplyConfiguration represents an declarative configuration of the PowerVSNodePoolPlatform type for use -// with apply. -type PowerVSNodePoolPlatformApplyConfiguration struct { - SystemType *string `json:"systemType,omitempty"` - ProcessorType *v1alpha1.PowerVSNodePoolProcType `json:"processorType,omitempty"` - Processors *intstr.IntOrString `json:"processors,omitempty"` - MemoryGiB *int32 `json:"memoryGiB,omitempty"` - Image *PowerVSResourceReferenceApplyConfiguration `json:"image,omitempty"` - StorageType *v1alpha1.PowerVSNodePoolStorageType `json:"storageType,omitempty"` - ImageDeletePolicy *v1alpha1.PowerVSNodePoolImageDeletePolicy `json:"imageDeletePolicy,omitempty"` -} - -// PowerVSNodePoolPlatformApplyConfiguration constructs an declarative configuration of the PowerVSNodePoolPlatform type for use with -// apply. -func PowerVSNodePoolPlatform() *PowerVSNodePoolPlatformApplyConfiguration { - return &PowerVSNodePoolPlatformApplyConfiguration{} -} - -// WithSystemType sets the SystemType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SystemType field is set to the value of the last call. -func (b *PowerVSNodePoolPlatformApplyConfiguration) WithSystemType(value string) *PowerVSNodePoolPlatformApplyConfiguration { - b.SystemType = &value - return b -} - -// WithProcessorType sets the ProcessorType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ProcessorType field is set to the value of the last call. -func (b *PowerVSNodePoolPlatformApplyConfiguration) WithProcessorType(value v1alpha1.PowerVSNodePoolProcType) *PowerVSNodePoolPlatformApplyConfiguration { - b.ProcessorType = &value - return b -} - -// WithProcessors sets the Processors field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Processors field is set to the value of the last call. -func (b *PowerVSNodePoolPlatformApplyConfiguration) WithProcessors(value intstr.IntOrString) *PowerVSNodePoolPlatformApplyConfiguration { - b.Processors = &value - return b -} - -// WithMemoryGiB sets the MemoryGiB field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MemoryGiB field is set to the value of the last call. -func (b *PowerVSNodePoolPlatformApplyConfiguration) WithMemoryGiB(value int32) *PowerVSNodePoolPlatformApplyConfiguration { - b.MemoryGiB = &value - return b -} - -// WithImage sets the Image field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Image field is set to the value of the last call. -func (b *PowerVSNodePoolPlatformApplyConfiguration) WithImage(value *PowerVSResourceReferenceApplyConfiguration) *PowerVSNodePoolPlatformApplyConfiguration { - b.Image = value - return b -} - -// WithStorageType sets the StorageType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StorageType field is set to the value of the last call. -func (b *PowerVSNodePoolPlatformApplyConfiguration) WithStorageType(value v1alpha1.PowerVSNodePoolStorageType) *PowerVSNodePoolPlatformApplyConfiguration { - b.StorageType = &value - return b -} - -// WithImageDeletePolicy sets the ImageDeletePolicy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ImageDeletePolicy field is set to the value of the last call. -func (b *PowerVSNodePoolPlatformApplyConfiguration) WithImageDeletePolicy(value v1alpha1.PowerVSNodePoolImageDeletePolicy) *PowerVSNodePoolPlatformApplyConfiguration { - b.ImageDeletePolicy = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsplatformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsplatformspec.go deleted file mode 100644 index f60998305..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsplatformspec.go +++ /dev/null @@ -1,150 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" -) - -// PowerVSPlatformSpecApplyConfiguration represents an declarative configuration of the PowerVSPlatformSpec type for use -// with apply. -type PowerVSPlatformSpecApplyConfiguration struct { - AccountID *string `json:"accountID,omitempty"` - CISInstanceCRN *string `json:"cisInstanceCRN,omitempty"` - ResourceGroup *string `json:"resourceGroup,omitempty"` - Region *string `json:"region,omitempty"` - Zone *string `json:"zone,omitempty"` - Subnet *PowerVSResourceReferenceApplyConfiguration `json:"subnet,omitempty"` - ServiceInstanceID *string `json:"serviceInstanceID,omitempty"` - VPC *PowerVSVPCApplyConfiguration `json:"vpc,omitempty"` - KubeCloudControllerCreds *v1.LocalObjectReference `json:"kubeCloudControllerCreds,omitempty"` - NodePoolManagementCreds *v1.LocalObjectReference `json:"nodePoolManagementCreds,omitempty"` - IngressOperatorCloudCreds *v1.LocalObjectReference `json:"ingressOperatorCloudCreds,omitempty"` - StorageOperatorCloudCreds *v1.LocalObjectReference `json:"storageOperatorCloudCreds,omitempty"` - ImageRegistryOperatorCloudCreds *v1.LocalObjectReference `json:"imageRegistryOperatorCloudCreds,omitempty"` -} - -// PowerVSPlatformSpecApplyConfiguration constructs an declarative configuration of the PowerVSPlatformSpec type for use with -// apply. -func PowerVSPlatformSpec() *PowerVSPlatformSpecApplyConfiguration { - return &PowerVSPlatformSpecApplyConfiguration{} -} - -// WithAccountID sets the AccountID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AccountID field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithAccountID(value string) *PowerVSPlatformSpecApplyConfiguration { - b.AccountID = &value - return b -} - -// WithCISInstanceCRN sets the CISInstanceCRN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CISInstanceCRN field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithCISInstanceCRN(value string) *PowerVSPlatformSpecApplyConfiguration { - b.CISInstanceCRN = &value - return b -} - -// WithResourceGroup sets the ResourceGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceGroup field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithResourceGroup(value string) *PowerVSPlatformSpecApplyConfiguration { - b.ResourceGroup = &value - return b -} - -// WithRegion sets the Region field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Region field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithRegion(value string) *PowerVSPlatformSpecApplyConfiguration { - b.Region = &value - return b -} - -// WithZone sets the Zone field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Zone field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithZone(value string) *PowerVSPlatformSpecApplyConfiguration { - b.Zone = &value - return b -} - -// WithSubnet sets the Subnet field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Subnet field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithSubnet(value *PowerVSResourceReferenceApplyConfiguration) *PowerVSPlatformSpecApplyConfiguration { - b.Subnet = value - return b -} - -// WithServiceInstanceID sets the ServiceInstanceID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ServiceInstanceID field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithServiceInstanceID(value string) *PowerVSPlatformSpecApplyConfiguration { - b.ServiceInstanceID = &value - return b -} - -// WithVPC sets the VPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VPC field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithVPC(value *PowerVSVPCApplyConfiguration) *PowerVSPlatformSpecApplyConfiguration { - b.VPC = value - return b -} - -// WithKubeCloudControllerCreds sets the KubeCloudControllerCreds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KubeCloudControllerCreds field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithKubeCloudControllerCreds(value v1.LocalObjectReference) *PowerVSPlatformSpecApplyConfiguration { - b.KubeCloudControllerCreds = &value - return b -} - -// WithNodePoolManagementCreds sets the NodePoolManagementCreds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodePoolManagementCreds field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithNodePoolManagementCreds(value v1.LocalObjectReference) *PowerVSPlatformSpecApplyConfiguration { - b.NodePoolManagementCreds = &value - return b -} - -// WithIngressOperatorCloudCreds sets the IngressOperatorCloudCreds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IngressOperatorCloudCreds field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithIngressOperatorCloudCreds(value v1.LocalObjectReference) *PowerVSPlatformSpecApplyConfiguration { - b.IngressOperatorCloudCreds = &value - return b -} - -// WithStorageOperatorCloudCreds sets the StorageOperatorCloudCreds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StorageOperatorCloudCreds field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithStorageOperatorCloudCreds(value v1.LocalObjectReference) *PowerVSPlatformSpecApplyConfiguration { - b.StorageOperatorCloudCreds = &value - return b -} - -// WithImageRegistryOperatorCloudCreds sets the ImageRegistryOperatorCloudCreds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ImageRegistryOperatorCloudCreds field is set to the value of the last call. -func (b *PowerVSPlatformSpecApplyConfiguration) WithImageRegistryOperatorCloudCreds(value v1.LocalObjectReference) *PowerVSPlatformSpecApplyConfiguration { - b.ImageRegistryOperatorCloudCreds = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsvpc.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsvpc.go deleted file mode 100644 index c5a03eb00..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsvpc.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// PowerVSVPCApplyConfiguration represents an declarative configuration of the PowerVSVPC type for use -// with apply. -type PowerVSVPCApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Region *string `json:"region,omitempty"` - Zone *string `json:"zone,omitempty"` - Subnet *string `json:"subnet,omitempty"` -} - -// PowerVSVPCApplyConfiguration constructs an declarative configuration of the PowerVSVPC type for use with -// apply. -func PowerVSVPC() *PowerVSVPCApplyConfiguration { - return &PowerVSVPCApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PowerVSVPCApplyConfiguration) WithName(value string) *PowerVSVPCApplyConfiguration { - b.Name = &value - return b -} - -// WithRegion sets the Region field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Region field is set to the value of the last call. -func (b *PowerVSVPCApplyConfiguration) WithRegion(value string) *PowerVSVPCApplyConfiguration { - b.Region = &value - return b -} - -// WithZone sets the Zone field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Zone field is set to the value of the last call. -func (b *PowerVSVPCApplyConfiguration) WithZone(value string) *PowerVSVPCApplyConfiguration { - b.Zone = &value - return b -} - -// WithSubnet sets the Subnet field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Subnet field is set to the value of the last call. -func (b *PowerVSVPCApplyConfiguration) WithSubnet(value string) *PowerVSVPCApplyConfiguration { - b.Subnet = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/release.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/release.go deleted file mode 100644 index deceff7de..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/release.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// ReleaseApplyConfiguration represents an declarative configuration of the Release type for use -// with apply. -type ReleaseApplyConfiguration struct { - Image *string `json:"image,omitempty"` -} - -// ReleaseApplyConfiguration constructs an declarative configuration of the Release type for use with -// apply. -func Release() *ReleaseApplyConfiguration { - return &ReleaseApplyConfiguration{} -} - -// WithImage sets the Image field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Image field is set to the value of the last call. -func (b *ReleaseApplyConfiguration) WithImage(value string) *ReleaseApplyConfiguration { - b.Image = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/replaceupgrade.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/replaceupgrade.go deleted file mode 100644 index f48a538dd..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/replaceupgrade.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// ReplaceUpgradeApplyConfiguration represents an declarative configuration of the ReplaceUpgrade type for use -// with apply. -type ReplaceUpgradeApplyConfiguration struct { - Strategy *v1alpha1.UpgradeStrategy `json:"strategy,omitempty"` - RollingUpdate *RollingUpdateApplyConfiguration `json:"rollingUpdate,omitempty"` -} - -// ReplaceUpgradeApplyConfiguration constructs an declarative configuration of the ReplaceUpgrade type for use with -// apply. -func ReplaceUpgrade() *ReplaceUpgradeApplyConfiguration { - return &ReplaceUpgradeApplyConfiguration{} -} - -// WithStrategy sets the Strategy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Strategy field is set to the value of the last call. -func (b *ReplaceUpgradeApplyConfiguration) WithStrategy(value v1alpha1.UpgradeStrategy) *ReplaceUpgradeApplyConfiguration { - b.Strategy = &value - return b -} - -// WithRollingUpdate sets the RollingUpdate field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RollingUpdate field is set to the value of the last call. -func (b *ReplaceUpgradeApplyConfiguration) WithRollingUpdate(value *RollingUpdateApplyConfiguration) *ReplaceUpgradeApplyConfiguration { - b.RollingUpdate = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/rollingupdate.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/rollingupdate.go deleted file mode 100644 index 7e55d889b..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/rollingupdate.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// RollingUpdateApplyConfiguration represents an declarative configuration of the RollingUpdate type for use -// with apply. -type RollingUpdateApplyConfiguration struct { - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` - MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` -} - -// RollingUpdateApplyConfiguration constructs an declarative configuration of the RollingUpdate type for use with -// apply. -func RollingUpdate() *RollingUpdateApplyConfiguration { - return &RollingUpdateApplyConfiguration{} -} - -// WithMaxUnavailable sets the MaxUnavailable field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MaxUnavailable field is set to the value of the last call. -func (b *RollingUpdateApplyConfiguration) WithMaxUnavailable(value intstr.IntOrString) *RollingUpdateApplyConfiguration { - b.MaxUnavailable = &value - return b -} - -// WithMaxSurge sets the MaxSurge field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MaxSurge field is set to the value of the last call. -func (b *RollingUpdateApplyConfiguration) WithMaxSurge(value intstr.IntOrString) *RollingUpdateApplyConfiguration { - b.MaxSurge = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/routepublishingstrategy.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/routepublishingstrategy.go deleted file mode 100644 index 5769cb803..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/routepublishingstrategy.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// RoutePublishingStrategyApplyConfiguration represents an declarative configuration of the RoutePublishingStrategy type for use -// with apply. -type RoutePublishingStrategyApplyConfiguration struct { - Hostname *string `json:"hostname,omitempty"` -} - -// RoutePublishingStrategyApplyConfiguration constructs an declarative configuration of the RoutePublishingStrategy type for use with -// apply. -func RoutePublishingStrategy() *RoutePublishingStrategyApplyConfiguration { - return &RoutePublishingStrategyApplyConfiguration{} -} - -// WithHostname sets the Hostname field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Hostname field is set to the value of the last call. -func (b *RoutePublishingStrategyApplyConfiguration) WithHostname(value string) *RoutePublishingStrategyApplyConfiguration { - b.Hostname = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/secretencryptionspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/secretencryptionspec.go deleted file mode 100644 index f38595426..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/secretencryptionspec.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// SecretEncryptionSpecApplyConfiguration represents an declarative configuration of the SecretEncryptionSpec type for use -// with apply. -type SecretEncryptionSpecApplyConfiguration struct { - Type *v1alpha1.SecretEncryptionType `json:"type,omitempty"` - KMS *KMSSpecApplyConfiguration `json:"kms,omitempty"` - AESCBC *AESCBCSpecApplyConfiguration `json:"aescbc,omitempty"` -} - -// SecretEncryptionSpecApplyConfiguration constructs an declarative configuration of the SecretEncryptionSpec type for use with -// apply. -func SecretEncryptionSpec() *SecretEncryptionSpecApplyConfiguration { - return &SecretEncryptionSpecApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *SecretEncryptionSpecApplyConfiguration) WithType(value v1alpha1.SecretEncryptionType) *SecretEncryptionSpecApplyConfiguration { - b.Type = &value - return b -} - -// WithKMS sets the KMS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KMS field is set to the value of the last call. -func (b *SecretEncryptionSpecApplyConfiguration) WithKMS(value *KMSSpecApplyConfiguration) *SecretEncryptionSpecApplyConfiguration { - b.KMS = value - return b -} - -// WithAESCBC sets the AESCBC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AESCBC field is set to the value of the last call. -func (b *SecretEncryptionSpecApplyConfiguration) WithAESCBC(value *AESCBCSpecApplyConfiguration) *SecretEncryptionSpecApplyConfiguration { - b.AESCBC = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicenetworkentry.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicenetworkentry.go deleted file mode 100644 index 8001c6d88..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicenetworkentry.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - ipnet "github.com/openshift/hypershift/api/util/ipnet" -) - -// ServiceNetworkEntryApplyConfiguration represents an declarative configuration of the ServiceNetworkEntry type for use -// with apply. -type ServiceNetworkEntryApplyConfiguration struct { - CIDR *ipnet.IPNet `json:"cidr,omitempty"` -} - -// ServiceNetworkEntryApplyConfiguration constructs an declarative configuration of the ServiceNetworkEntry type for use with -// apply. -func ServiceNetworkEntry() *ServiceNetworkEntryApplyConfiguration { - return &ServiceNetworkEntryApplyConfiguration{} -} - -// WithCIDR sets the CIDR field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CIDR field is set to the value of the last call. -func (b *ServiceNetworkEntryApplyConfiguration) WithCIDR(value ipnet.IPNet) *ServiceNetworkEntryApplyConfiguration { - b.CIDR = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicepublishingstrategy.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicepublishingstrategy.go deleted file mode 100644 index 6140a9dc6..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicepublishingstrategy.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// ServicePublishingStrategyApplyConfiguration represents an declarative configuration of the ServicePublishingStrategy type for use -// with apply. -type ServicePublishingStrategyApplyConfiguration struct { - Type *v1alpha1.PublishingStrategyType `json:"type,omitempty"` - NodePort *NodePortPublishingStrategyApplyConfiguration `json:"nodePort,omitempty"` - LoadBalancer *LoadBalancerPublishingStrategyApplyConfiguration `json:"loadBalancer,omitempty"` - Route *RoutePublishingStrategyApplyConfiguration `json:"route,omitempty"` -} - -// ServicePublishingStrategyApplyConfiguration constructs an declarative configuration of the ServicePublishingStrategy type for use with -// apply. -func ServicePublishingStrategy() *ServicePublishingStrategyApplyConfiguration { - return &ServicePublishingStrategyApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *ServicePublishingStrategyApplyConfiguration) WithType(value v1alpha1.PublishingStrategyType) *ServicePublishingStrategyApplyConfiguration { - b.Type = &value - return b -} - -// WithNodePort sets the NodePort field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodePort field is set to the value of the last call. -func (b *ServicePublishingStrategyApplyConfiguration) WithNodePort(value *NodePortPublishingStrategyApplyConfiguration) *ServicePublishingStrategyApplyConfiguration { - b.NodePort = value - return b -} - -// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LoadBalancer field is set to the value of the last call. -func (b *ServicePublishingStrategyApplyConfiguration) WithLoadBalancer(value *LoadBalancerPublishingStrategyApplyConfiguration) *ServicePublishingStrategyApplyConfiguration { - b.LoadBalancer = value - return b -} - -// WithRoute sets the Route field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Route field is set to the value of the last call. -func (b *ServicePublishingStrategyApplyConfiguration) WithRoute(value *RoutePublishingStrategyApplyConfiguration) *ServicePublishingStrategyApplyConfiguration { - b.Route = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicepublishingstrategymapping.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicepublishingstrategymapping.go deleted file mode 100644 index 8d36a43e8..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/servicepublishingstrategymapping.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" -) - -// ServicePublishingStrategyMappingApplyConfiguration represents an declarative configuration of the ServicePublishingStrategyMapping type for use -// with apply. -type ServicePublishingStrategyMappingApplyConfiguration struct { - Service *v1alpha1.ServiceType `json:"service,omitempty"` - *ServicePublishingStrategyApplyConfiguration `json:"servicePublishingStrategy,omitempty"` -} - -// ServicePublishingStrategyMappingApplyConfiguration constructs an declarative configuration of the ServicePublishingStrategyMapping type for use with -// apply. -func ServicePublishingStrategyMapping() *ServicePublishingStrategyMappingApplyConfiguration { - return &ServicePublishingStrategyMappingApplyConfiguration{} -} - -// WithService sets the Service field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Service field is set to the value of the last call. -func (b *ServicePublishingStrategyMappingApplyConfiguration) WithService(value v1alpha1.ServiceType) *ServicePublishingStrategyMappingApplyConfiguration { - b.Service = &value - return b -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *ServicePublishingStrategyMappingApplyConfiguration) WithType(value v1alpha1.PublishingStrategyType) *ServicePublishingStrategyMappingApplyConfiguration { - b.ensureServicePublishingStrategyApplyConfigurationExists() - b.Type = &value - return b -} - -// WithNodePort sets the NodePort field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodePort field is set to the value of the last call. -func (b *ServicePublishingStrategyMappingApplyConfiguration) WithNodePort(value *NodePortPublishingStrategyApplyConfiguration) *ServicePublishingStrategyMappingApplyConfiguration { - b.ensureServicePublishingStrategyApplyConfigurationExists() - b.NodePort = value - return b -} - -// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LoadBalancer field is set to the value of the last call. -func (b *ServicePublishingStrategyMappingApplyConfiguration) WithLoadBalancer(value *LoadBalancerPublishingStrategyApplyConfiguration) *ServicePublishingStrategyMappingApplyConfiguration { - b.ensureServicePublishingStrategyApplyConfigurationExists() - b.LoadBalancer = value - return b -} - -// WithRoute sets the Route field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Route field is set to the value of the last call. -func (b *ServicePublishingStrategyMappingApplyConfiguration) WithRoute(value *RoutePublishingStrategyApplyConfiguration) *ServicePublishingStrategyMappingApplyConfiguration { - b.ensureServicePublishingStrategyApplyConfigurationExists() - b.Route = value - return b -} - -func (b *ServicePublishingStrategyMappingApplyConfiguration) ensureServicePublishingStrategyApplyConfigurationExists() { - if b.ServicePublishingStrategyApplyConfiguration == nil { - b.ServicePublishingStrategyApplyConfiguration = &ServicePublishingStrategyApplyConfiguration{} - } -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/taint.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/taint.go deleted file mode 100644 index f02023d9e..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/taint.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" -) - -// TaintApplyConfiguration represents an declarative configuration of the Taint type for use -// with apply. -type TaintApplyConfiguration struct { - Key *string `json:"key,omitempty"` - Value *string `json:"value,omitempty"` - Effect *v1.TaintEffect `json:"effect,omitempty"` -} - -// TaintApplyConfiguration constructs an declarative configuration of the Taint type for use with -// apply. -func Taint() *TaintApplyConfiguration { - return &TaintApplyConfiguration{} -} - -// WithKey sets the Key field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Key field is set to the value of the last call. -func (b *TaintApplyConfiguration) WithKey(value string) *TaintApplyConfiguration { - b.Key = &value - return b -} - -// WithValue sets the Value field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Value field is set to the value of the last call. -func (b *TaintApplyConfiguration) WithValue(value string) *TaintApplyConfiguration { - b.Value = &value - return b -} - -// WithEffect sets the Effect field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Effect field is set to the value of the last call. -func (b *TaintApplyConfiguration) WithEffect(value v1.TaintEffect) *TaintApplyConfiguration { - b.Effect = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/unmanagedetcdspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/unmanagedetcdspec.go deleted file mode 100644 index 91d1d6752..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/unmanagedetcdspec.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// UnmanagedEtcdSpecApplyConfiguration represents an declarative configuration of the UnmanagedEtcdSpec type for use -// with apply. -type UnmanagedEtcdSpecApplyConfiguration struct { - Endpoint *string `json:"endpoint,omitempty"` - TLS *EtcdTLSConfigApplyConfiguration `json:"tls,omitempty"` -} - -// UnmanagedEtcdSpecApplyConfiguration constructs an declarative configuration of the UnmanagedEtcdSpec type for use with -// apply. -func UnmanagedEtcdSpec() *UnmanagedEtcdSpecApplyConfiguration { - return &UnmanagedEtcdSpecApplyConfiguration{} -} - -// WithEndpoint sets the Endpoint field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Endpoint field is set to the value of the last call. -func (b *UnmanagedEtcdSpecApplyConfiguration) WithEndpoint(value string) *UnmanagedEtcdSpecApplyConfiguration { - b.Endpoint = &value - return b -} - -// WithTLS sets the TLS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the TLS field is set to the value of the last call. -func (b *UnmanagedEtcdSpecApplyConfiguration) WithTLS(value *EtcdTLSConfigApplyConfiguration) *UnmanagedEtcdSpecApplyConfiguration { - b.TLS = value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/volume.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/volume.go deleted file mode 100644 index 22c2e43f2..000000000 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/volume.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// VolumeApplyConfiguration represents an declarative configuration of the Volume type for use -// with apply. -type VolumeApplyConfiguration struct { - Size *int64 `json:"size,omitempty"` - Type *string `json:"type,omitempty"` - IOPS *int64 `json:"iops,omitempty"` - Encrypted *bool `json:"encrypted,omitempty"` - EncryptionKey *string `json:"encryptionKey,omitempty"` -} - -// VolumeApplyConfiguration constructs an declarative configuration of the Volume type for use with -// apply. -func Volume() *VolumeApplyConfiguration { - return &VolumeApplyConfiguration{} -} - -// WithSize sets the Size field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Size field is set to the value of the last call. -func (b *VolumeApplyConfiguration) WithSize(value int64) *VolumeApplyConfiguration { - b.Size = &value - return b -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *VolumeApplyConfiguration) WithType(value string) *VolumeApplyConfiguration { - b.Type = &value - return b -} - -// WithIOPS sets the IOPS field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IOPS field is set to the value of the last call. -func (b *VolumeApplyConfiguration) WithIOPS(value int64) *VolumeApplyConfiguration { - b.IOPS = &value - return b -} - -// WithEncrypted sets the Encrypted field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Encrypted field is set to the value of the last call. -func (b *VolumeApplyConfiguration) WithEncrypted(value bool) *VolumeApplyConfiguration { - b.Encrypted = &value - return b -} - -// WithEncryptionKey sets the EncryptionKey field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the EncryptionKey field is set to the value of the last call. -func (b *VolumeApplyConfiguration) WithEncryptionKey(value string) *VolumeApplyConfiguration { - b.EncryptionKey = &value - return b -} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/allocationpool.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/allocationpool.go new file mode 100644 index 000000000..9ea024cb4 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/allocationpool.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// AllocationPoolApplyConfiguration represents an declarative configuration of the AllocationPool type for use +// with apply. +type AllocationPoolApplyConfiguration struct { + Start *string `json:"start,omitempty"` + End *string `json:"end,omitempty"` +} + +// AllocationPoolApplyConfiguration constructs an declarative configuration of the AllocationPool type for use with +// apply. +func AllocationPool() *AllocationPoolApplyConfiguration { + return &AllocationPoolApplyConfiguration{} +} + +// WithStart sets the Start field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Start field is set to the value of the last call. +func (b *AllocationPoolApplyConfiguration) WithStart(value string) *AllocationPoolApplyConfiguration { + b.Start = &value + return b +} + +// WithEnd sets the End field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the End field is set to the value of the last call. +func (b *AllocationPoolApplyConfiguration) WithEnd(value string) *AllocationPoolApplyConfiguration { + b.End = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go index 5c9131e6b..d9caedfab 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go @@ -27,6 +27,7 @@ type AWSNodePoolPlatformApplyConfiguration struct { SecurityGroups []AWSResourceReferenceApplyConfiguration `json:"securityGroups,omitempty"` RootVolume *VolumeApplyConfiguration `json:"rootVolume,omitempty"` ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + Placement *PlacementOptionsApplyConfiguration `json:"placement,omitempty"` } // AWSNodePoolPlatformApplyConfiguration constructs an declarative configuration of the AWSNodePoolPlatform type for use with @@ -100,3 +101,11 @@ func (b *AWSNodePoolPlatformApplyConfiguration) WithResourceTags(values ...*AWSR } return b } + +// WithPlacement sets the Placement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Placement field is set to the value of the last call. +func (b *AWSNodePoolPlatformApplyConfiguration) WithPlacement(value *PlacementOptionsApplyConfiguration) *AWSNodePoolPlatformApplyConfiguration { + b.Placement = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go index 07a51c02d..cc11a9497 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go @@ -32,6 +32,7 @@ type AWSPlatformSpecApplyConfiguration struct { EndpointAccess *hypershiftv1beta1.AWSEndpointAccessType `json:"endpointAccess,omitempty"` AdditionalAllowedPrincipals []string `json:"additionalAllowedPrincipals,omitempty"` MultiArch *bool `json:"multiArch,omitempty"` + SharedVPC *AWSSharedVPCApplyConfiguration `json:"sharedVPC,omitempty"` } // AWSPlatformSpecApplyConfiguration constructs an declarative configuration of the AWSPlatformSpec type for use with @@ -115,3 +116,11 @@ func (b *AWSPlatformSpecApplyConfiguration) WithMultiArch(value bool) *AWSPlatfo b.MultiArch = &value return b } + +// WithSharedVPC sets the SharedVPC field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SharedVPC field is set to the value of the last call. +func (b *AWSPlatformSpecApplyConfiguration) WithSharedVPC(value *AWSSharedVPCApplyConfiguration) *AWSPlatformSpecApplyConfiguration { + b.SharedVPC = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go new file mode 100644 index 000000000..70a0a5750 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// AWSSharedVPCApplyConfiguration represents an declarative configuration of the AWSSharedVPC type for use +// with apply. +type AWSSharedVPCApplyConfiguration struct { + RolesRef *AWSSharedVPCRolesRefApplyConfiguration `json:"rolesRef,omitempty"` + LocalZoneID *string `json:"localZoneID,omitempty"` +} + +// AWSSharedVPCApplyConfiguration constructs an declarative configuration of the AWSSharedVPC type for use with +// apply. +func AWSSharedVPC() *AWSSharedVPCApplyConfiguration { + return &AWSSharedVPCApplyConfiguration{} +} + +// WithRolesRef sets the RolesRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RolesRef field is set to the value of the last call. +func (b *AWSSharedVPCApplyConfiguration) WithRolesRef(value *AWSSharedVPCRolesRefApplyConfiguration) *AWSSharedVPCApplyConfiguration { + b.RolesRef = value + return b +} + +// WithLocalZoneID sets the LocalZoneID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LocalZoneID field is set to the value of the last call. +func (b *AWSSharedVPCApplyConfiguration) WithLocalZoneID(value string) *AWSSharedVPCApplyConfiguration { + b.LocalZoneID = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awssharedvpcrolesref.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awssharedvpcrolesref.go new file mode 100644 index 000000000..2968acab3 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/awssharedvpcrolesref.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// AWSSharedVPCRolesRefApplyConfiguration represents an declarative configuration of the AWSSharedVPCRolesRef type for use +// with apply. +type AWSSharedVPCRolesRefApplyConfiguration struct { + IngressARN *string `json:"ingressARN,omitempty"` + ControlPlaneARN *string `json:"controlPlaneARN,omitempty"` +} + +// AWSSharedVPCRolesRefApplyConfiguration constructs an declarative configuration of the AWSSharedVPCRolesRef type for use with +// apply. +func AWSSharedVPCRolesRef() *AWSSharedVPCRolesRefApplyConfiguration { + return &AWSSharedVPCRolesRefApplyConfiguration{} +} + +// WithIngressARN sets the IngressARN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IngressARN field is set to the value of the last call. +func (b *AWSSharedVPCRolesRefApplyConfiguration) WithIngressARN(value string) *AWSSharedVPCRolesRefApplyConfiguration { + b.IngressARN = &value + return b +} + +// WithControlPlaneARN sets the ControlPlaneARN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ControlPlaneARN field is set to the value of the last call. +func (b *AWSSharedVPCRolesRefApplyConfiguration) WithControlPlaneARN(value string) *AWSSharedVPCRolesRefApplyConfiguration { + b.ControlPlaneARN = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go index d052f44a6..9a0f54940 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go @@ -20,8 +20,9 @@ package v1beta1 // AzureKMSSpecApplyConfiguration represents an declarative configuration of the AzureKMSSpec type for use // with apply. type AzureKMSSpecApplyConfiguration struct { - ActiveKey *AzureKMSKeyApplyConfiguration `json:"activeKey,omitempty"` - BackupKey *AzureKMSKeyApplyConfiguration `json:"backupKey,omitempty"` + ActiveKey *AzureKMSKeyApplyConfiguration `json:"activeKey,omitempty"` + BackupKey *AzureKMSKeyApplyConfiguration `json:"backupKey,omitempty"` + KMS *ManagedIdentityApplyConfiguration `json:"kms,omitempty"` } // AzureKMSSpecApplyConfiguration constructs an declarative configuration of the AzureKMSSpec type for use with @@ -45,3 +46,11 @@ func (b *AzureKMSSpecApplyConfiguration) WithBackupKey(value *AzureKMSKeyApplyCo b.BackupKey = value return b } + +// WithKMS sets the KMS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KMS field is set to the value of the last call. +func (b *AzureKMSSpecApplyConfiguration) WithKMS(value *ManagedIdentityApplyConfiguration) *AzureKMSSpecApplyConfiguration { + b.KMS = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azuremarketplaceimage.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azuremarketplaceimage.go new file mode 100644 index 000000000..9dd0e64d4 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azuremarketplaceimage.go @@ -0,0 +1,65 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// AzureMarketplaceImageApplyConfiguration represents an declarative configuration of the AzureMarketplaceImage type for use +// with apply. +type AzureMarketplaceImageApplyConfiguration struct { + Publisher *string `json:"publisher,omitempty"` + Offer *string `json:"offer,omitempty"` + SKU *string `json:"sku,omitempty"` + Version *string `json:"version,omitempty"` +} + +// AzureMarketplaceImageApplyConfiguration constructs an declarative configuration of the AzureMarketplaceImage type for use with +// apply. +func AzureMarketplaceImage() *AzureMarketplaceImageApplyConfiguration { + return &AzureMarketplaceImageApplyConfiguration{} +} + +// WithPublisher sets the Publisher field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Publisher field is set to the value of the last call. +func (b *AzureMarketplaceImageApplyConfiguration) WithPublisher(value string) *AzureMarketplaceImageApplyConfiguration { + b.Publisher = &value + return b +} + +// WithOffer sets the Offer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Offer field is set to the value of the last call. +func (b *AzureMarketplaceImageApplyConfiguration) WithOffer(value string) *AzureMarketplaceImageApplyConfiguration { + b.Offer = &value + return b +} + +// WithSKU sets the SKU field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SKU field is set to the value of the last call. +func (b *AzureMarketplaceImageApplyConfiguration) WithSKU(value string) *AzureMarketplaceImageApplyConfiguration { + b.SKU = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *AzureMarketplaceImageApplyConfiguration) WithVersion(value string) *AzureMarketplaceImageApplyConfiguration { + b.Version = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurenodepoolosdisk.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurenodepoolosdisk.go new file mode 100644 index 000000000..6133a83fc --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurenodepoolosdisk.go @@ -0,0 +1,69 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +// AzureNodePoolOSDiskApplyConfiguration represents an declarative configuration of the AzureNodePoolOSDisk type for use +// with apply. +type AzureNodePoolOSDiskApplyConfiguration struct { + SizeGiB *int32 `json:"sizeGiB,omitempty"` + DiskStorageAccountType *v1beta1.AzureDiskStorageAccountType `json:"diskStorageAccountType,omitempty"` + EncryptionSetID *string `json:"encryptionSetID,omitempty"` + Persistence *v1beta1.AzureDiskPersistence `json:"persistence,omitempty"` +} + +// AzureNodePoolOSDiskApplyConfiguration constructs an declarative configuration of the AzureNodePoolOSDisk type for use with +// apply. +func AzureNodePoolOSDisk() *AzureNodePoolOSDiskApplyConfiguration { + return &AzureNodePoolOSDiskApplyConfiguration{} +} + +// WithSizeGiB sets the SizeGiB field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SizeGiB field is set to the value of the last call. +func (b *AzureNodePoolOSDiskApplyConfiguration) WithSizeGiB(value int32) *AzureNodePoolOSDiskApplyConfiguration { + b.SizeGiB = &value + return b +} + +// WithDiskStorageAccountType sets the DiskStorageAccountType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DiskStorageAccountType field is set to the value of the last call. +func (b *AzureNodePoolOSDiskApplyConfiguration) WithDiskStorageAccountType(value v1beta1.AzureDiskStorageAccountType) *AzureNodePoolOSDiskApplyConfiguration { + b.DiskStorageAccountType = &value + return b +} + +// WithEncryptionSetID sets the EncryptionSetID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EncryptionSetID field is set to the value of the last call. +func (b *AzureNodePoolOSDiskApplyConfiguration) WithEncryptionSetID(value string) *AzureNodePoolOSDiskApplyConfiguration { + b.EncryptionSetID = &value + return b +} + +// WithPersistence sets the Persistence field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Persistence field is set to the value of the last call. +func (b *AzureNodePoolOSDiskApplyConfiguration) WithPersistence(value v1beta1.AzureDiskPersistence) *AzureNodePoolOSDiskApplyConfiguration { + b.Persistence = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go index 53bd0ba06..4040add23 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go @@ -20,15 +20,14 @@ package v1beta1 // AzureNodePoolPlatformApplyConfiguration represents an declarative configuration of the AzureNodePoolPlatform type for use // with apply. type AzureNodePoolPlatformApplyConfiguration struct { - VMSize *string `json:"vmsize,omitempty"` - ImageID *string `json:"imageID,omitempty"` - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - DiskStorageAccountType *string `json:"diskStorageAccountType,omitempty"` - AvailabilityZone *string `json:"availabilityZone,omitempty"` - DiskEncryptionSetID *string `json:"diskEncryptionSetID,omitempty"` - EnableEphemeralOSDisk *bool `json:"enableEphemeralOSDisk,omitempty"` - SubnetID *string `json:"subnetID,omitempty"` - Diagnostics *DiagnosticsApplyConfiguration `json:"diagnostics,omitempty"` + VMSize *string `json:"vmSize,omitempty"` + Image *AzureVMImageApplyConfiguration `json:"image,omitempty"` + OSDisk *AzureNodePoolOSDiskApplyConfiguration `json:"osDisk,omitempty"` + AvailabilityZone *string `json:"availabilityZone,omitempty"` + EncryptionAtHost *string `json:"encryptionAtHost,omitempty"` + SubnetID *string `json:"subnetID,omitempty"` + Diagnostics *DiagnosticsApplyConfiguration `json:"diagnostics,omitempty"` + MachineIdentityID *string `json:"machineIdentityID,omitempty"` } // AzureNodePoolPlatformApplyConfiguration constructs an declarative configuration of the AzureNodePoolPlatform type for use with @@ -45,27 +44,19 @@ func (b *AzureNodePoolPlatformApplyConfiguration) WithVMSize(value string) *Azur return b } -// WithImageID sets the ImageID field in the declarative configuration to the given value +// WithImage sets the Image field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ImageID field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithImageID(value string) *AzureNodePoolPlatformApplyConfiguration { - b.ImageID = &value +// If called multiple times, the Image field is set to the value of the last call. +func (b *AzureNodePoolPlatformApplyConfiguration) WithImage(value *AzureVMImageApplyConfiguration) *AzureNodePoolPlatformApplyConfiguration { + b.Image = value return b } -// WithDiskSizeGB sets the DiskSizeGB field in the declarative configuration to the given value +// WithOSDisk sets the OSDisk field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DiskSizeGB field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithDiskSizeGB(value int32) *AzureNodePoolPlatformApplyConfiguration { - b.DiskSizeGB = &value - return b -} - -// WithDiskStorageAccountType sets the DiskStorageAccountType field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DiskStorageAccountType field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithDiskStorageAccountType(value string) *AzureNodePoolPlatformApplyConfiguration { - b.DiskStorageAccountType = &value +// If called multiple times, the OSDisk field is set to the value of the last call. +func (b *AzureNodePoolPlatformApplyConfiguration) WithOSDisk(value *AzureNodePoolOSDiskApplyConfiguration) *AzureNodePoolPlatformApplyConfiguration { + b.OSDisk = value return b } @@ -77,19 +68,11 @@ func (b *AzureNodePoolPlatformApplyConfiguration) WithAvailabilityZone(value str return b } -// WithDiskEncryptionSetID sets the DiskEncryptionSetID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DiskEncryptionSetID field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithDiskEncryptionSetID(value string) *AzureNodePoolPlatformApplyConfiguration { - b.DiskEncryptionSetID = &value - return b -} - -// WithEnableEphemeralOSDisk sets the EnableEphemeralOSDisk field in the declarative configuration to the given value +// WithEncryptionAtHost sets the EncryptionAtHost field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the EnableEphemeralOSDisk field is set to the value of the last call. -func (b *AzureNodePoolPlatformApplyConfiguration) WithEnableEphemeralOSDisk(value bool) *AzureNodePoolPlatformApplyConfiguration { - b.EnableEphemeralOSDisk = &value +// If called multiple times, the EncryptionAtHost field is set to the value of the last call. +func (b *AzureNodePoolPlatformApplyConfiguration) WithEncryptionAtHost(value string) *AzureNodePoolPlatformApplyConfiguration { + b.EncryptionAtHost = &value return b } @@ -108,3 +91,11 @@ func (b *AzureNodePoolPlatformApplyConfiguration) WithDiagnostics(value *Diagnos b.Diagnostics = value return b } + +// WithMachineIdentityID sets the MachineIdentityID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MachineIdentityID field is set to the value of the last call. +func (b *AzureNodePoolPlatformApplyConfiguration) WithMachineIdentityID(value string) *AzureNodePoolPlatformApplyConfiguration { + b.MachineIdentityID = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go index 731ae9dd8..3d105baae 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go @@ -24,15 +24,15 @@ import ( // AzurePlatformSpecApplyConfiguration represents an declarative configuration of the AzurePlatformSpec type for use // with apply. type AzurePlatformSpecApplyConfiguration struct { - Credentials *v1.LocalObjectReference `json:"credentials,omitempty"` - Cloud *string `json:"cloud,omitempty"` - Location *string `json:"location,omitempty"` - ResourceGroupName *string `json:"resourceGroup,omitempty"` - VnetID *string `json:"vnetID,omitempty"` - SubnetID *string `json:"subnetID,omitempty"` - SubscriptionID *string `json:"subscriptionID,omitempty"` - MachineIdentityID *string `json:"machineIdentityID,omitempty"` - SecurityGroupID *string `json:"securityGroupID,omitempty"` + Credentials *v1.LocalObjectReference `json:"credentials,omitempty"` + Cloud *string `json:"cloud,omitempty"` + Location *string `json:"location,omitempty"` + ResourceGroupName *string `json:"resourceGroup,omitempty"` + VnetID *string `json:"vnetID,omitempty"` + SubnetID *string `json:"subnetID,omitempty"` + SubscriptionID *string `json:"subscriptionID,omitempty"` + SecurityGroupID *string `json:"securityGroupID,omitempty"` + ManagedIdentities *AzureResourceManagedIdentitiesApplyConfiguration `json:"managedIdentities,omitempty"` } // AzurePlatformSpecApplyConfiguration constructs an declarative configuration of the AzurePlatformSpec type for use with @@ -97,14 +97,6 @@ func (b *AzurePlatformSpecApplyConfiguration) WithSubscriptionID(value string) * return b } -// WithMachineIdentityID sets the MachineIdentityID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MachineIdentityID field is set to the value of the last call. -func (b *AzurePlatformSpecApplyConfiguration) WithMachineIdentityID(value string) *AzurePlatformSpecApplyConfiguration { - b.MachineIdentityID = &value - return b -} - // WithSecurityGroupID sets the SecurityGroupID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SecurityGroupID field is set to the value of the last call. @@ -112,3 +104,11 @@ func (b *AzurePlatformSpecApplyConfiguration) WithSecurityGroupID(value string) b.SecurityGroupID = &value return b } + +// WithManagedIdentities sets the ManagedIdentities field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagedIdentities field is set to the value of the last call. +func (b *AzurePlatformSpecApplyConfiguration) WithManagedIdentities(value *AzureResourceManagedIdentitiesApplyConfiguration) *AzurePlatformSpecApplyConfiguration { + b.ManagedIdentities = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go new file mode 100644 index 000000000..30e89a517 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go @@ -0,0 +1,38 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// AzureResourceManagedIdentitiesApplyConfiguration represents an declarative configuration of the AzureResourceManagedIdentities type for use +// with apply. +type AzureResourceManagedIdentitiesApplyConfiguration struct { + ControlPlane *ControlPlaneManagedIdentitiesApplyConfiguration `json:"controlPlane,omitempty"` +} + +// AzureResourceManagedIdentitiesApplyConfiguration constructs an declarative configuration of the AzureResourceManagedIdentities type for use with +// apply. +func AzureResourceManagedIdentities() *AzureResourceManagedIdentitiesApplyConfiguration { + return &AzureResourceManagedIdentitiesApplyConfiguration{} +} + +// WithControlPlane sets the ControlPlane field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ControlPlane field is set to the value of the last call. +func (b *AzureResourceManagedIdentitiesApplyConfiguration) WithControlPlane(value *ControlPlaneManagedIdentitiesApplyConfiguration) *AzureResourceManagedIdentitiesApplyConfiguration { + b.ControlPlane = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go new file mode 100644 index 000000000..3cbf2cfaf --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go @@ -0,0 +1,60 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +// AzureVMImageApplyConfiguration represents an declarative configuration of the AzureVMImage type for use +// with apply. +type AzureVMImageApplyConfiguration struct { + Type *v1beta1.AzureVMImageType `json:"type,omitempty"` + ImageID *string `json:"imageID,omitempty"` + AzureMarketplace *AzureMarketplaceImageApplyConfiguration `json:"azureMarketplace,omitempty"` +} + +// AzureVMImageApplyConfiguration constructs an declarative configuration of the AzureVMImage type for use with +// apply. +func AzureVMImage() *AzureVMImageApplyConfiguration { + return &AzureVMImageApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *AzureVMImageApplyConfiguration) WithType(value v1beta1.AzureVMImageType) *AzureVMImageApplyConfiguration { + b.Type = &value + return b +} + +// WithImageID sets the ImageID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageID field is set to the value of the last call. +func (b *AzureVMImageApplyConfiguration) WithImageID(value string) *AzureVMImageApplyConfiguration { + b.ImageID = &value + return b +} + +// WithAzureMarketplace sets the AzureMarketplace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AzureMarketplace field is set to the value of the last call. +func (b *AzureVMImageApplyConfiguration) WithAzureMarketplace(value *AzureMarketplaceImageApplyConfiguration) *AzureVMImageApplyConfiguration { + b.AzureMarketplace = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go new file mode 100644 index 000000000..421ca49b4 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go @@ -0,0 +1,110 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ControlPlaneManagedIdentitiesApplyConfiguration represents an declarative configuration of the ControlPlaneManagedIdentities type for use +// with apply. +type ControlPlaneManagedIdentitiesApplyConfiguration struct { + ManagedIdentitiesKeyVault *ManagedAzureKeyVaultApplyConfiguration `json:"managedIdentitiesKeyVault,omitempty"` + CloudProvider *ManagedIdentityApplyConfiguration `json:"cloudProvider,omitempty"` + NodePoolManagement *ManagedIdentityApplyConfiguration `json:"nodePoolManagement,omitempty"` + ControlPlaneOperator *ManagedIdentityApplyConfiguration `json:"controlPlaneOperator,omitempty"` + ImageRegistry *ManagedIdentityApplyConfiguration `json:"imageRegistry,omitempty"` + Ingress *ManagedIdentityApplyConfiguration `json:"ingress,omitempty"` + Network *ManagedIdentityApplyConfiguration `json:"network,omitempty"` + Disk *ManagedIdentityApplyConfiguration `json:"disk,omitempty"` + File *ManagedIdentityApplyConfiguration `json:"file,omitempty"` +} + +// ControlPlaneManagedIdentitiesApplyConfiguration constructs an declarative configuration of the ControlPlaneManagedIdentities type for use with +// apply. +func ControlPlaneManagedIdentities() *ControlPlaneManagedIdentitiesApplyConfiguration { + return &ControlPlaneManagedIdentitiesApplyConfiguration{} +} + +// WithManagedIdentitiesKeyVault sets the ManagedIdentitiesKeyVault field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagedIdentitiesKeyVault field is set to the value of the last call. +func (b *ControlPlaneManagedIdentitiesApplyConfiguration) WithManagedIdentitiesKeyVault(value *ManagedAzureKeyVaultApplyConfiguration) *ControlPlaneManagedIdentitiesApplyConfiguration { + b.ManagedIdentitiesKeyVault = value + return b +} + +// WithCloudProvider sets the CloudProvider field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudProvider field is set to the value of the last call. +func (b *ControlPlaneManagedIdentitiesApplyConfiguration) WithCloudProvider(value *ManagedIdentityApplyConfiguration) *ControlPlaneManagedIdentitiesApplyConfiguration { + b.CloudProvider = value + return b +} + +// WithNodePoolManagement sets the NodePoolManagement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodePoolManagement field is set to the value of the last call. +func (b *ControlPlaneManagedIdentitiesApplyConfiguration) WithNodePoolManagement(value *ManagedIdentityApplyConfiguration) *ControlPlaneManagedIdentitiesApplyConfiguration { + b.NodePoolManagement = value + return b +} + +// WithControlPlaneOperator sets the ControlPlaneOperator field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ControlPlaneOperator field is set to the value of the last call. +func (b *ControlPlaneManagedIdentitiesApplyConfiguration) WithControlPlaneOperator(value *ManagedIdentityApplyConfiguration) *ControlPlaneManagedIdentitiesApplyConfiguration { + b.ControlPlaneOperator = value + return b +} + +// WithImageRegistry sets the ImageRegistry field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageRegistry field is set to the value of the last call. +func (b *ControlPlaneManagedIdentitiesApplyConfiguration) WithImageRegistry(value *ManagedIdentityApplyConfiguration) *ControlPlaneManagedIdentitiesApplyConfiguration { + b.ImageRegistry = value + return b +} + +// WithIngress sets the Ingress field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ingress field is set to the value of the last call. +func (b *ControlPlaneManagedIdentitiesApplyConfiguration) WithIngress(value *ManagedIdentityApplyConfiguration) *ControlPlaneManagedIdentitiesApplyConfiguration { + b.Ingress = value + return b +} + +// WithNetwork sets the Network field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Network field is set to the value of the last call. +func (b *ControlPlaneManagedIdentitiesApplyConfiguration) WithNetwork(value *ManagedIdentityApplyConfiguration) *ControlPlaneManagedIdentitiesApplyConfiguration { + b.Network = value + return b +} + +// WithDisk sets the Disk field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Disk field is set to the value of the last call. +func (b *ControlPlaneManagedIdentitiesApplyConfiguration) WithDisk(value *ManagedIdentityApplyConfiguration) *ControlPlaneManagedIdentitiesApplyConfiguration { + b.Disk = value + return b +} + +// WithFile sets the File field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the File field is set to the value of the last call. +func (b *ControlPlaneManagedIdentitiesApplyConfiguration) WithFile(value *ManagedIdentityApplyConfiguration) *ControlPlaneManagedIdentitiesApplyConfiguration { + b.File = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/diagnostics.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/diagnostics.go index fb80a413d..d7c6a9428 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/diagnostics.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/diagnostics.go @@ -17,11 +17,15 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + // DiagnosticsApplyConfiguration represents an declarative configuration of the Diagnostics type for use // with apply. type DiagnosticsApplyConfiguration struct { - StorageAccountType *string `json:"storageAccountType,omitempty"` - StorageAccountURI *string `json:"storageAccountURI,omitempty"` + StorageAccountType *v1beta1.AzureDiagnosticsStorageAccountType `json:"storageAccountType,omitempty"` + UserManaged *UserManagedDiagnosticsApplyConfiguration `json:"userManaged,omitempty"` } // DiagnosticsApplyConfiguration constructs an declarative configuration of the Diagnostics type for use with @@ -33,15 +37,15 @@ func Diagnostics() *DiagnosticsApplyConfiguration { // WithStorageAccountType sets the StorageAccountType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StorageAccountType field is set to the value of the last call. -func (b *DiagnosticsApplyConfiguration) WithStorageAccountType(value string) *DiagnosticsApplyConfiguration { +func (b *DiagnosticsApplyConfiguration) WithStorageAccountType(value v1beta1.AzureDiagnosticsStorageAccountType) *DiagnosticsApplyConfiguration { b.StorageAccountType = &value return b } -// WithStorageAccountURI sets the StorageAccountURI field in the declarative configuration to the given value +// WithUserManaged sets the UserManaged field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StorageAccountURI field is set to the value of the last call. -func (b *DiagnosticsApplyConfiguration) WithStorageAccountURI(value string) *DiagnosticsApplyConfiguration { - b.StorageAccountURI = &value +// If called multiple times, the UserManaged field is set to the value of the last call. +func (b *DiagnosticsApplyConfiguration) WithUserManaged(value *UserManagedDiagnosticsApplyConfiguration) *DiagnosticsApplyConfiguration { + b.UserManaged = value return b } diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/filterbyneutrontags.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/filterbyneutrontags.go new file mode 100644 index 000000000..e762cccb2 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/filterbyneutrontags.go @@ -0,0 +1,77 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +// FilterByNeutronTagsApplyConfiguration represents an declarative configuration of the FilterByNeutronTags type for use +// with apply. +type FilterByNeutronTagsApplyConfiguration struct { + Tags []v1beta1.NeutronTag `json:"tags,omitempty"` + TagsAny []v1beta1.NeutronTag `json:"tagsAny,omitempty"` + NotTags []v1beta1.NeutronTag `json:"notTags,omitempty"` + NotTagsAny []v1beta1.NeutronTag `json:"notTagsAny,omitempty"` +} + +// FilterByNeutronTagsApplyConfiguration constructs an declarative configuration of the FilterByNeutronTags type for use with +// apply. +func FilterByNeutronTags() *FilterByNeutronTagsApplyConfiguration { + return &FilterByNeutronTagsApplyConfiguration{} +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *FilterByNeutronTagsApplyConfiguration) WithTags(values ...v1beta1.NeutronTag) *FilterByNeutronTagsApplyConfiguration { + for i := range values { + b.Tags = append(b.Tags, values[i]) + } + return b +} + +// WithTagsAny adds the given value to the TagsAny field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TagsAny field. +func (b *FilterByNeutronTagsApplyConfiguration) WithTagsAny(values ...v1beta1.NeutronTag) *FilterByNeutronTagsApplyConfiguration { + for i := range values { + b.TagsAny = append(b.TagsAny, values[i]) + } + return b +} + +// WithNotTags adds the given value to the NotTags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NotTags field. +func (b *FilterByNeutronTagsApplyConfiguration) WithNotTags(values ...v1beta1.NeutronTag) *FilterByNeutronTagsApplyConfiguration { + for i := range values { + b.NotTags = append(b.NotTags, values[i]) + } + return b +} + +// WithNotTagsAny adds the given value to the NotTagsAny field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NotTagsAny field. +func (b *FilterByNeutronTagsApplyConfiguration) WithNotTagsAny(values ...v1beta1.NeutronTag) *FilterByNeutronTagsApplyConfiguration { + for i := range values { + b.NotTagsAny = append(b.NotTagsAny, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go index b6e89b340..fd6e58a5a 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go @@ -53,6 +53,7 @@ type HostedClusterSpecApplyConfiguration struct { PausedUntil *string `json:"pausedUntil,omitempty"` OLMCatalogPlacement *hypershiftv1beta1.OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"` NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } // HostedClusterSpecApplyConfiguration constructs an declarative configuration of the HostedClusterSpec type for use with @@ -292,3 +293,13 @@ func (b *HostedClusterSpecApplyConfiguration) WithNodeSelector(entries map[strin } return b } + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *HostedClusterSpecApplyConfiguration) WithTolerations(values ...corev1.Toleration) *HostedClusterSpecApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go index c0b9d5931..433804746 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go @@ -18,6 +18,7 @@ limitations under the License. package v1beta1 import ( + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) @@ -32,6 +33,7 @@ type HostedClusterStatusApplyConfiguration struct { ControlPlaneEndpoint *APIEndpointApplyConfiguration `json:"controlPlaneEndpoint,omitempty"` OAuthCallbackURLTemplate *string `json:"oauthCallbackURLTemplate,omitempty"` Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + PayloadArch *hypershiftv1beta1.PayloadArchType `json:"payloadArch,omitempty"` Platform *PlatformStatusApplyConfiguration `json:"platform,omitempty"` } @@ -102,6 +104,14 @@ func (b *HostedClusterStatusApplyConfiguration) WithConditions(values ...*metav1 return b } +// WithPayloadArch sets the PayloadArch field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PayloadArch field is set to the value of the last call. +func (b *HostedClusterStatusApplyConfiguration) WithPayloadArch(value hypershiftv1beta1.PayloadArchType) *HostedClusterStatusApplyConfiguration { + b.PayloadArch = &value + return b +} + // WithPlatform sets the Platform field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Platform field is set to the value of the last call. diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go index 3fd545463..f541e163e 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go @@ -54,6 +54,7 @@ type HostedControlPlaneSpecApplyConfiguration struct { OLMCatalogPlacement *hypershiftv1beta1.OLMCatalogPlacement `json:"olmCatalogPlacement,omitempty"` Autoscaling *ClusterAutoscalingApplyConfiguration `json:"autoscaling,omitempty"` NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } // HostedControlPlaneSpecApplyConfiguration constructs an declarative configuration of the HostedControlPlaneSpec type for use with @@ -301,3 +302,13 @@ func (b *HostedControlPlaneSpecApplyConfiguration) WithNodeSelector(entries map[ } return b } + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *HostedControlPlaneSpecApplyConfiguration) WithTolerations(values ...corev1.Toleration) *HostedControlPlaneSpecApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/kubevirthostdevice.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/kubevirthostdevice.go new file mode 100644 index 000000000..f7d40e873 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/kubevirthostdevice.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// KubevirtHostDeviceApplyConfiguration represents an declarative configuration of the KubevirtHostDevice type for use +// with apply. +type KubevirtHostDeviceApplyConfiguration struct { + DeviceName *string `json:"deviceName,omitempty"` + Count *int `json:"count,omitempty"` +} + +// KubevirtHostDeviceApplyConfiguration constructs an declarative configuration of the KubevirtHostDevice type for use with +// apply. +func KubevirtHostDevice() *KubevirtHostDeviceApplyConfiguration { + return &KubevirtHostDeviceApplyConfiguration{} +} + +// WithDeviceName sets the DeviceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeviceName field is set to the value of the last call. +func (b *KubevirtHostDeviceApplyConfiguration) WithDeviceName(value string) *KubevirtHostDeviceApplyConfiguration { + b.DeviceName = &value + return b +} + +// WithCount sets the Count field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Count field is set to the value of the last call. +func (b *KubevirtHostDeviceApplyConfiguration) WithCount(value int) *KubevirtHostDeviceApplyConfiguration { + b.Count = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go index 421cac736..88d667927 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go @@ -24,12 +24,13 @@ import ( // KubevirtNodePoolPlatformApplyConfiguration represents an declarative configuration of the KubevirtNodePoolPlatform type for use // with apply. type KubevirtNodePoolPlatformApplyConfiguration struct { - RootVolume *KubevirtRootVolumeApplyConfiguration `json:"rootVolume,omitempty"` - Compute *KubevirtComputeApplyConfiguration `json:"compute,omitempty"` - NetworkInterfaceMultiQueue *hypershiftv1beta1.MultiQueueSetting `json:"networkInterfaceMultiqueue,omitempty"` - AdditionalNetworks []KubevirtNetworkApplyConfiguration `json:"additionalNetworks,omitempty"` - AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` + RootVolume *KubevirtRootVolumeApplyConfiguration `json:"rootVolume,omitempty"` + Compute *KubevirtComputeApplyConfiguration `json:"compute,omitempty"` + NetworkInterfaceMultiQueue *hypershiftv1beta1.MultiQueueSetting `json:"networkInterfaceMultiqueue,omitempty"` + AdditionalNetworks []KubevirtNetworkApplyConfiguration `json:"additionalNetworks,omitempty"` + AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + KubevirtHostDevices []KubevirtHostDeviceApplyConfiguration `json:"hostDevices,omitempty"` } // KubevirtNodePoolPlatformApplyConfiguration constructs an declarative configuration of the KubevirtNodePoolPlatform type for use with @@ -96,3 +97,16 @@ func (b *KubevirtNodePoolPlatformApplyConfiguration) WithNodeSelector(entries ma } return b } + +// WithKubevirtHostDevices adds the given value to the KubevirtHostDevices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the KubevirtHostDevices field. +func (b *KubevirtNodePoolPlatformApplyConfiguration) WithKubevirtHostDevices(values ...*KubevirtHostDeviceApplyConfiguration) *KubevirtNodePoolPlatformApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithKubevirtHostDevices") + } + b.KubevirtHostDevices = append(b.KubevirtHostDevices, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubeconfigsecretref.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/managedazurekeyvault.go similarity index 51% rename from vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubeconfigsecretref.go rename to vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/managedazurekeyvault.go index 607f90f3b..754573473 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/kubeconfigsecretref.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/managedazurekeyvault.go @@ -15,33 +15,33 @@ limitations under the License. */ // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 -// KubeconfigSecretRefApplyConfiguration represents an declarative configuration of the KubeconfigSecretRef type for use +// ManagedAzureKeyVaultApplyConfiguration represents an declarative configuration of the ManagedAzureKeyVault type for use // with apply. -type KubeconfigSecretRefApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Key *string `json:"key,omitempty"` +type ManagedAzureKeyVaultApplyConfiguration struct { + Name *string `json:"name,omitempty"` + TenantID *string `json:"tenantID,omitempty"` } -// KubeconfigSecretRefApplyConfiguration constructs an declarative configuration of the KubeconfigSecretRef type for use with +// ManagedAzureKeyVaultApplyConfiguration constructs an declarative configuration of the ManagedAzureKeyVault type for use with // apply. -func KubeconfigSecretRef() *KubeconfigSecretRefApplyConfiguration { - return &KubeconfigSecretRefApplyConfiguration{} +func ManagedAzureKeyVault() *ManagedAzureKeyVaultApplyConfiguration { + return &ManagedAzureKeyVaultApplyConfiguration{} } // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *KubeconfigSecretRefApplyConfiguration) WithName(value string) *KubeconfigSecretRefApplyConfiguration { +func (b *ManagedAzureKeyVaultApplyConfiguration) WithName(value string) *ManagedAzureKeyVaultApplyConfiguration { b.Name = &value return b } -// WithKey sets the Key field in the declarative configuration to the given value +// WithTenantID sets the TenantID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Key field is set to the value of the last call. -func (b *KubeconfigSecretRefApplyConfiguration) WithKey(value string) *KubeconfigSecretRefApplyConfiguration { - b.Key = &value +// If called multiple times, the TenantID field is set to the value of the last call. +func (b *ManagedAzureKeyVaultApplyConfiguration) WithTenantID(value string) *ManagedAzureKeyVaultApplyConfiguration { + b.TenantID = &value return b } diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/managedidentity.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/managedidentity.go new file mode 100644 index 000000000..a77bcf4b3 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/managedidentity.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ManagedIdentityApplyConfiguration represents an declarative configuration of the ManagedIdentity type for use +// with apply. +type ManagedIdentityApplyConfiguration struct { + ClientID *string `json:"clientID,omitempty"` + CertificateName *string `json:"certificateName,omitempty"` +} + +// ManagedIdentityApplyConfiguration constructs an declarative configuration of the ManagedIdentity type for use with +// apply. +func ManagedIdentity() *ManagedIdentityApplyConfiguration { + return &ManagedIdentityApplyConfiguration{} +} + +// WithClientID sets the ClientID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClientID field is set to the value of the last call. +func (b *ManagedIdentityApplyConfiguration) WithClientID(value string) *ManagedIdentityApplyConfiguration { + b.ClientID = &value + return b +} + +// WithCertificateName sets the CertificateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CertificateName field is set to the value of the last call. +func (b *ManagedIdentityApplyConfiguration) WithCertificateName(value string) *ManagedIdentityApplyConfiguration { + b.CertificateName = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/networkfilter.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/networkfilter.go new file mode 100644 index 000000000..6b9b3679d --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/networkfilter.go @@ -0,0 +1,101 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +// NetworkFilterApplyConfiguration represents an declarative configuration of the NetworkFilter type for use +// with apply. +type NetworkFilterApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + ProjectID *string `json:"projectID,omitempty"` + FilterByNeutronTagsApplyConfiguration `json:",inline"` +} + +// NetworkFilterApplyConfiguration constructs an declarative configuration of the NetworkFilter type for use with +// apply. +func NetworkFilter() *NetworkFilterApplyConfiguration { + return &NetworkFilterApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NetworkFilterApplyConfiguration) WithName(value string) *NetworkFilterApplyConfiguration { + b.Name = &value + return b +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *NetworkFilterApplyConfiguration) WithDescription(value string) *NetworkFilterApplyConfiguration { + b.Description = &value + return b +} + +// WithProjectID sets the ProjectID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProjectID field is set to the value of the last call. +func (b *NetworkFilterApplyConfiguration) WithProjectID(value string) *NetworkFilterApplyConfiguration { + b.ProjectID = &value + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *NetworkFilterApplyConfiguration) WithTags(values ...hypershiftv1beta1.NeutronTag) *NetworkFilterApplyConfiguration { + for i := range values { + b.Tags = append(b.Tags, values[i]) + } + return b +} + +// WithTagsAny adds the given value to the TagsAny field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TagsAny field. +func (b *NetworkFilterApplyConfiguration) WithTagsAny(values ...hypershiftv1beta1.NeutronTag) *NetworkFilterApplyConfiguration { + for i := range values { + b.TagsAny = append(b.TagsAny, values[i]) + } + return b +} + +// WithNotTags adds the given value to the NotTags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NotTags field. +func (b *NetworkFilterApplyConfiguration) WithNotTags(values ...hypershiftv1beta1.NeutronTag) *NetworkFilterApplyConfiguration { + for i := range values { + b.NotTags = append(b.NotTags, values[i]) + } + return b +} + +// WithNotTagsAny adds the given value to the NotTagsAny field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NotTagsAny field. +func (b *NetworkFilterApplyConfiguration) WithNotTagsAny(values ...hypershiftv1beta1.NeutronTag) *NetworkFilterApplyConfiguration { + for i := range values { + b.NotTagsAny = append(b.NotTagsAny, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsresourcereference.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/networkparam.go similarity index 51% rename from vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsresourcereference.go rename to vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/networkparam.go index 7e3a4ff36..d859ec4cb 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/powervsresourcereference.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/networkparam.go @@ -15,33 +15,33 @@ limitations under the License. */ // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 -// PowerVSResourceReferenceApplyConfiguration represents an declarative configuration of the PowerVSResourceReference type for use +// NetworkParamApplyConfiguration represents an declarative configuration of the NetworkParam type for use // with apply. -type PowerVSResourceReferenceApplyConfiguration struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` +type NetworkParamApplyConfiguration struct { + ID *string `json:"id,omitempty"` + Filter *NetworkFilterApplyConfiguration `json:"filter,omitempty"` } -// PowerVSResourceReferenceApplyConfiguration constructs an declarative configuration of the PowerVSResourceReference type for use with +// NetworkParamApplyConfiguration constructs an declarative configuration of the NetworkParam type for use with // apply. -func PowerVSResourceReference() *PowerVSResourceReferenceApplyConfiguration { - return &PowerVSResourceReferenceApplyConfiguration{} +func NetworkParam() *NetworkParamApplyConfiguration { + return &NetworkParamApplyConfiguration{} } // WithID sets the ID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ID field is set to the value of the last call. -func (b *PowerVSResourceReferenceApplyConfiguration) WithID(value string) *PowerVSResourceReferenceApplyConfiguration { +func (b *NetworkParamApplyConfiguration) WithID(value string) *NetworkParamApplyConfiguration { b.ID = &value return b } -// WithName sets the Name field in the declarative configuration to the given value +// WithFilter sets the Filter field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PowerVSResourceReferenceApplyConfiguration) WithName(value string) *PowerVSResourceReferenceApplyConfiguration { - b.Name = &value +// If called multiple times, the Filter field is set to the value of the last call. +func (b *NetworkParamApplyConfiguration) WithFilter(value *NetworkFilterApplyConfiguration) *NetworkParamApplyConfiguration { + b.Filter = value return b } diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go index 79393c74a..a6bec424f 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go @@ -24,13 +24,14 @@ import ( // NodePoolPlatformApplyConfiguration represents an declarative configuration of the NodePoolPlatform type for use // with apply. type NodePoolPlatformApplyConfiguration struct { - Type *v1beta1.PlatformType `json:"type,omitempty"` - AWS *AWSNodePoolPlatformApplyConfiguration `json:"aws,omitempty"` - IBMCloud *IBMCloudPlatformSpecApplyConfiguration `json:"ibmcloud,omitempty"` - Kubevirt *KubevirtNodePoolPlatformApplyConfiguration `json:"kubevirt,omitempty"` - Agent *AgentNodePoolPlatformApplyConfiguration `json:"agent,omitempty"` - Azure *AzureNodePoolPlatformApplyConfiguration `json:"azure,omitempty"` - PowerVS *PowerVSNodePoolPlatformApplyConfiguration `json:"powervs,omitempty"` + Type *v1beta1.PlatformType `json:"type,omitempty"` + AWS *AWSNodePoolPlatformApplyConfiguration `json:"aws,omitempty"` + IBMCloud *IBMCloudPlatformSpecApplyConfiguration `json:"ibmcloud,omitempty"` + Kubevirt *KubevirtNodePoolPlatformApplyConfiguration `json:"kubevirt,omitempty"` + Agent *AgentNodePoolPlatformApplyConfiguration `json:"agent,omitempty"` + Azure *AzureNodePoolPlatformApplyConfiguration `json:"azure,omitempty"` + PowerVS *PowerVSNodePoolPlatformApplyConfiguration `json:"powervs,omitempty"` + OpenStack *OpenStackNodePoolPlatformApplyConfiguration `json:"openstack,omitempty"` } // NodePoolPlatformApplyConfiguration constructs an declarative configuration of the NodePoolPlatform type for use with @@ -94,3 +95,11 @@ func (b *NodePoolPlatformApplyConfiguration) WithPowerVS(value *PowerVSNodePoolP b.PowerVS = value return b } + +// WithOpenStack sets the OpenStack field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpenStack field is set to the value of the last call. +func (b *NodePoolPlatformApplyConfiguration) WithOpenStack(value *OpenStackNodePoolPlatformApplyConfiguration) *NodePoolPlatformApplyConfiguration { + b.OpenStack = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstackidentityreference.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstackidentityreference.go new file mode 100644 index 000000000..5b74ea2ea --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstackidentityreference.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// OpenStackIdentityReferenceApplyConfiguration represents an declarative configuration of the OpenStackIdentityReference type for use +// with apply. +type OpenStackIdentityReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + CloudName *string `json:"cloudName,omitempty"` +} + +// OpenStackIdentityReferenceApplyConfiguration constructs an declarative configuration of the OpenStackIdentityReference type for use with +// apply. +func OpenStackIdentityReference() *OpenStackIdentityReferenceApplyConfiguration { + return &OpenStackIdentityReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OpenStackIdentityReferenceApplyConfiguration) WithName(value string) *OpenStackIdentityReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithCloudName sets the CloudName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudName field is set to the value of the last call. +func (b *OpenStackIdentityReferenceApplyConfiguration) WithCloudName(value string) *OpenStackIdentityReferenceApplyConfiguration { + b.CloudName = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go new file mode 100644 index 000000000..0b8c186d7 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go @@ -0,0 +1,56 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// OpenStackNodePoolPlatformApplyConfiguration represents an declarative configuration of the OpenStackNodePoolPlatform type for use +// with apply. +type OpenStackNodePoolPlatformApplyConfiguration struct { + Flavor *string `json:"flavor,omitempty"` + ImageName *string `json:"imageName,omitempty"` + AvailabilityZone *string `json:"availabilityZone,omitempty"` +} + +// OpenStackNodePoolPlatformApplyConfiguration constructs an declarative configuration of the OpenStackNodePoolPlatform type for use with +// apply. +func OpenStackNodePoolPlatform() *OpenStackNodePoolPlatformApplyConfiguration { + return &OpenStackNodePoolPlatformApplyConfiguration{} +} + +// WithFlavor sets the Flavor field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Flavor field is set to the value of the last call. +func (b *OpenStackNodePoolPlatformApplyConfiguration) WithFlavor(value string) *OpenStackNodePoolPlatformApplyConfiguration { + b.Flavor = &value + return b +} + +// WithImageName sets the ImageName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageName field is set to the value of the last call. +func (b *OpenStackNodePoolPlatformApplyConfiguration) WithImageName(value string) *OpenStackNodePoolPlatformApplyConfiguration { + b.ImageName = &value + return b +} + +// WithAvailabilityZone sets the AvailabilityZone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailabilityZone field is set to the value of the last call. +func (b *OpenStackNodePoolPlatformApplyConfiguration) WithAvailabilityZone(value string) *OpenStackNodePoolPlatformApplyConfiguration { + b.AvailabilityZone = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go new file mode 100644 index 000000000..070a3e490 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go @@ -0,0 +1,122 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// OpenStackPlatformSpecApplyConfiguration represents an declarative configuration of the OpenStackPlatformSpec type for use +// with apply. +type OpenStackPlatformSpecApplyConfiguration struct { + IdentityRef *OpenStackIdentityReferenceApplyConfiguration `json:"identityRef,omitempty"` + ManagedSubnets []SubnetSpecApplyConfiguration `json:"managedSubnets,omitempty"` + Router *RouterParamApplyConfiguration `json:"router,omitempty"` + Network *NetworkParamApplyConfiguration `json:"network,omitempty"` + Subnets []SubnetParamApplyConfiguration `json:"subnets,omitempty"` + NetworkMTU *int `json:"networkMTU,omitempty"` + ExternalNetwork *NetworkParamApplyConfiguration `json:"externalNetwork,omitempty"` + DisableExternalNetwork *bool `json:"disableExternalNetwork,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +// OpenStackPlatformSpecApplyConfiguration constructs an declarative configuration of the OpenStackPlatformSpec type for use with +// apply. +func OpenStackPlatformSpec() *OpenStackPlatformSpecApplyConfiguration { + return &OpenStackPlatformSpecApplyConfiguration{} +} + +// WithIdentityRef sets the IdentityRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IdentityRef field is set to the value of the last call. +func (b *OpenStackPlatformSpecApplyConfiguration) WithIdentityRef(value *OpenStackIdentityReferenceApplyConfiguration) *OpenStackPlatformSpecApplyConfiguration { + b.IdentityRef = value + return b +} + +// WithManagedSubnets adds the given value to the ManagedSubnets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ManagedSubnets field. +func (b *OpenStackPlatformSpecApplyConfiguration) WithManagedSubnets(values ...*SubnetSpecApplyConfiguration) *OpenStackPlatformSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithManagedSubnets") + } + b.ManagedSubnets = append(b.ManagedSubnets, *values[i]) + } + return b +} + +// WithRouter sets the Router field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Router field is set to the value of the last call. +func (b *OpenStackPlatformSpecApplyConfiguration) WithRouter(value *RouterParamApplyConfiguration) *OpenStackPlatformSpecApplyConfiguration { + b.Router = value + return b +} + +// WithNetwork sets the Network field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Network field is set to the value of the last call. +func (b *OpenStackPlatformSpecApplyConfiguration) WithNetwork(value *NetworkParamApplyConfiguration) *OpenStackPlatformSpecApplyConfiguration { + b.Network = value + return b +} + +// WithSubnets adds the given value to the Subnets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Subnets field. +func (b *OpenStackPlatformSpecApplyConfiguration) WithSubnets(values ...*SubnetParamApplyConfiguration) *OpenStackPlatformSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSubnets") + } + b.Subnets = append(b.Subnets, *values[i]) + } + return b +} + +// WithNetworkMTU sets the NetworkMTU field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkMTU field is set to the value of the last call. +func (b *OpenStackPlatformSpecApplyConfiguration) WithNetworkMTU(value int) *OpenStackPlatformSpecApplyConfiguration { + b.NetworkMTU = &value + return b +} + +// WithExternalNetwork sets the ExternalNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExternalNetwork field is set to the value of the last call. +func (b *OpenStackPlatformSpecApplyConfiguration) WithExternalNetwork(value *NetworkParamApplyConfiguration) *OpenStackPlatformSpecApplyConfiguration { + b.ExternalNetwork = value + return b +} + +// WithDisableExternalNetwork sets the DisableExternalNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DisableExternalNetwork field is set to the value of the last call. +func (b *OpenStackPlatformSpecApplyConfiguration) WithDisableExternalNetwork(value bool) *OpenStackPlatformSpecApplyConfiguration { + b.DisableExternalNetwork = &value + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *OpenStackPlatformSpecApplyConfiguration) WithTags(values ...string) *OpenStackPlatformSpecApplyConfiguration { + for i := range values { + b.Tags = append(b.Tags, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/managedetcdspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/placementoptions.go similarity index 50% rename from vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/managedetcdspec.go rename to vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/placementoptions.go index 25f514d0a..15f683975 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1/managedetcdspec.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/placementoptions.go @@ -15,24 +15,24 @@ limitations under the License. */ // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 -// ManagedEtcdSpecApplyConfiguration represents an declarative configuration of the ManagedEtcdSpec type for use +// PlacementOptionsApplyConfiguration represents an declarative configuration of the PlacementOptions type for use // with apply. -type ManagedEtcdSpecApplyConfiguration struct { - Storage *ManagedEtcdStorageSpecApplyConfiguration `json:"storage,omitempty"` +type PlacementOptionsApplyConfiguration struct { + Tenancy *string `json:"tenancy,omitempty"` } -// ManagedEtcdSpecApplyConfiguration constructs an declarative configuration of the ManagedEtcdSpec type for use with +// PlacementOptionsApplyConfiguration constructs an declarative configuration of the PlacementOptions type for use with // apply. -func ManagedEtcdSpec() *ManagedEtcdSpecApplyConfiguration { - return &ManagedEtcdSpecApplyConfiguration{} +func PlacementOptions() *PlacementOptionsApplyConfiguration { + return &PlacementOptionsApplyConfiguration{} } -// WithStorage sets the Storage field in the declarative configuration to the given value +// WithTenancy sets the Tenancy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Storage field is set to the value of the last call. -func (b *ManagedEtcdSpecApplyConfiguration) WithStorage(value *ManagedEtcdStorageSpecApplyConfiguration) *ManagedEtcdSpecApplyConfiguration { - b.Storage = value +// If called multiple times, the Tenancy field is set to the value of the last call. +func (b *PlacementOptionsApplyConfiguration) WithTenancy(value string) *PlacementOptionsApplyConfiguration { + b.Tenancy = &value return b } diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/platformspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/platformspec.go index efc7f9d34..a5213aa0e 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/platformspec.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/platformspec.go @@ -24,13 +24,14 @@ import ( // PlatformSpecApplyConfiguration represents an declarative configuration of the PlatformSpec type for use // with apply. type PlatformSpecApplyConfiguration struct { - Type *v1beta1.PlatformType `json:"type,omitempty"` - AWS *AWSPlatformSpecApplyConfiguration `json:"aws,omitempty"` - Agent *AgentPlatformSpecApplyConfiguration `json:"agent,omitempty"` - IBMCloud *IBMCloudPlatformSpecApplyConfiguration `json:"ibmcloud,omitempty"` - Azure *AzurePlatformSpecApplyConfiguration `json:"azure,omitempty"` - PowerVS *PowerVSPlatformSpecApplyConfiguration `json:"powervs,omitempty"` - Kubevirt *KubevirtPlatformSpecApplyConfiguration `json:"kubevirt,omitempty"` + Type *v1beta1.PlatformType `json:"type,omitempty"` + AWS *AWSPlatformSpecApplyConfiguration `json:"aws,omitempty"` + Agent *AgentPlatformSpecApplyConfiguration `json:"agent,omitempty"` + IBMCloud *IBMCloudPlatformSpecApplyConfiguration `json:"ibmcloud,omitempty"` + Azure *AzurePlatformSpecApplyConfiguration `json:"azure,omitempty"` + PowerVS *PowerVSPlatformSpecApplyConfiguration `json:"powervs,omitempty"` + Kubevirt *KubevirtPlatformSpecApplyConfiguration `json:"kubevirt,omitempty"` + OpenStack *OpenStackPlatformSpecApplyConfiguration `json:"openstack,omitempty"` } // PlatformSpecApplyConfiguration constructs an declarative configuration of the PlatformSpec type for use with @@ -94,3 +95,11 @@ func (b *PlatformSpecApplyConfiguration) WithKubevirt(value *KubevirtPlatformSpe b.Kubevirt = value return b } + +// WithOpenStack sets the OpenStack field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OpenStack field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithOpenStack(value *OpenStackPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { + b.OpenStack = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/routerfilter.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/routerfilter.go new file mode 100644 index 000000000..c3a52f277 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/routerfilter.go @@ -0,0 +1,101 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +// RouterFilterApplyConfiguration represents an declarative configuration of the RouterFilter type for use +// with apply. +type RouterFilterApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + ProjectID *string `json:"projectID,omitempty"` + FilterByNeutronTagsApplyConfiguration `json:",inline"` +} + +// RouterFilterApplyConfiguration constructs an declarative configuration of the RouterFilter type for use with +// apply. +func RouterFilter() *RouterFilterApplyConfiguration { + return &RouterFilterApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RouterFilterApplyConfiguration) WithName(value string) *RouterFilterApplyConfiguration { + b.Name = &value + return b +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *RouterFilterApplyConfiguration) WithDescription(value string) *RouterFilterApplyConfiguration { + b.Description = &value + return b +} + +// WithProjectID sets the ProjectID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProjectID field is set to the value of the last call. +func (b *RouterFilterApplyConfiguration) WithProjectID(value string) *RouterFilterApplyConfiguration { + b.ProjectID = &value + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *RouterFilterApplyConfiguration) WithTags(values ...hypershiftv1beta1.NeutronTag) *RouterFilterApplyConfiguration { + for i := range values { + b.Tags = append(b.Tags, values[i]) + } + return b +} + +// WithTagsAny adds the given value to the TagsAny field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TagsAny field. +func (b *RouterFilterApplyConfiguration) WithTagsAny(values ...hypershiftv1beta1.NeutronTag) *RouterFilterApplyConfiguration { + for i := range values { + b.TagsAny = append(b.TagsAny, values[i]) + } + return b +} + +// WithNotTags adds the given value to the NotTags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NotTags field. +func (b *RouterFilterApplyConfiguration) WithNotTags(values ...hypershiftv1beta1.NeutronTag) *RouterFilterApplyConfiguration { + for i := range values { + b.NotTags = append(b.NotTags, values[i]) + } + return b +} + +// WithNotTagsAny adds the given value to the NotTagsAny field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NotTagsAny field. +func (b *RouterFilterApplyConfiguration) WithNotTagsAny(values ...hypershiftv1beta1.NeutronTag) *RouterFilterApplyConfiguration { + for i := range values { + b.NotTagsAny = append(b.NotTagsAny, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/routerparam.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/routerparam.go new file mode 100644 index 000000000..1d3469c20 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/routerparam.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// RouterParamApplyConfiguration represents an declarative configuration of the RouterParam type for use +// with apply. +type RouterParamApplyConfiguration struct { + ID *string `json:"id,omitempty"` + Filter *RouterFilterApplyConfiguration `json:"filter,omitempty"` +} + +// RouterParamApplyConfiguration constructs an declarative configuration of the RouterParam type for use with +// apply. +func RouterParam() *RouterParamApplyConfiguration { + return &RouterParamApplyConfiguration{} +} + +// WithID sets the ID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ID field is set to the value of the last call. +func (b *RouterParamApplyConfiguration) WithID(value string) *RouterParamApplyConfiguration { + b.ID = &value + return b +} + +// WithFilter sets the Filter field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Filter field is set to the value of the last call. +func (b *RouterParamApplyConfiguration) WithFilter(value *RouterFilterApplyConfiguration) *RouterParamApplyConfiguration { + b.Filter = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go new file mode 100644 index 000000000..893dc3e32 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go @@ -0,0 +1,146 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +// SubnetFilterApplyConfiguration represents an declarative configuration of the SubnetFilter type for use +// with apply. +type SubnetFilterApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + ProjectID *string `json:"projectID,omitempty"` + IPVersion *int `json:"ipVersion,omitempty"` + GatewayIP *string `json:"gatewayIP,omitempty"` + CIDR *string `json:"cidr,omitempty"` + IPv6AddressMode *string `json:"ipv6AddressMode,omitempty"` + IPv6RAMode *string `json:"ipv6RAMode,omitempty"` + FilterByNeutronTagsApplyConfiguration `json:",inline"` +} + +// SubnetFilterApplyConfiguration constructs an declarative configuration of the SubnetFilter type for use with +// apply. +func SubnetFilter() *SubnetFilterApplyConfiguration { + return &SubnetFilterApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *SubnetFilterApplyConfiguration) WithName(value string) *SubnetFilterApplyConfiguration { + b.Name = &value + return b +} + +// WithDescription sets the Description field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Description field is set to the value of the last call. +func (b *SubnetFilterApplyConfiguration) WithDescription(value string) *SubnetFilterApplyConfiguration { + b.Description = &value + return b +} + +// WithProjectID sets the ProjectID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProjectID field is set to the value of the last call. +func (b *SubnetFilterApplyConfiguration) WithProjectID(value string) *SubnetFilterApplyConfiguration { + b.ProjectID = &value + return b +} + +// WithIPVersion sets the IPVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPVersion field is set to the value of the last call. +func (b *SubnetFilterApplyConfiguration) WithIPVersion(value int) *SubnetFilterApplyConfiguration { + b.IPVersion = &value + return b +} + +// WithGatewayIP sets the GatewayIP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GatewayIP field is set to the value of the last call. +func (b *SubnetFilterApplyConfiguration) WithGatewayIP(value string) *SubnetFilterApplyConfiguration { + b.GatewayIP = &value + return b +} + +// WithCIDR sets the CIDR field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CIDR field is set to the value of the last call. +func (b *SubnetFilterApplyConfiguration) WithCIDR(value string) *SubnetFilterApplyConfiguration { + b.CIDR = &value + return b +} + +// WithIPv6AddressMode sets the IPv6AddressMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv6AddressMode field is set to the value of the last call. +func (b *SubnetFilterApplyConfiguration) WithIPv6AddressMode(value string) *SubnetFilterApplyConfiguration { + b.IPv6AddressMode = &value + return b +} + +// WithIPv6RAMode sets the IPv6RAMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv6RAMode field is set to the value of the last call. +func (b *SubnetFilterApplyConfiguration) WithIPv6RAMode(value string) *SubnetFilterApplyConfiguration { + b.IPv6RAMode = &value + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *SubnetFilterApplyConfiguration) WithTags(values ...hypershiftv1beta1.NeutronTag) *SubnetFilterApplyConfiguration { + for i := range values { + b.Tags = append(b.Tags, values[i]) + } + return b +} + +// WithTagsAny adds the given value to the TagsAny field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TagsAny field. +func (b *SubnetFilterApplyConfiguration) WithTagsAny(values ...hypershiftv1beta1.NeutronTag) *SubnetFilterApplyConfiguration { + for i := range values { + b.TagsAny = append(b.TagsAny, values[i]) + } + return b +} + +// WithNotTags adds the given value to the NotTags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NotTags field. +func (b *SubnetFilterApplyConfiguration) WithNotTags(values ...hypershiftv1beta1.NeutronTag) *SubnetFilterApplyConfiguration { + for i := range values { + b.NotTags = append(b.NotTags, values[i]) + } + return b +} + +// WithNotTagsAny adds the given value to the NotTagsAny field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NotTagsAny field. +func (b *SubnetFilterApplyConfiguration) WithNotTagsAny(values ...hypershiftv1beta1.NeutronTag) *SubnetFilterApplyConfiguration { + for i := range values { + b.NotTagsAny = append(b.NotTagsAny, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetparam.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetparam.go new file mode 100644 index 000000000..6754f748d --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetparam.go @@ -0,0 +1,47 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// SubnetParamApplyConfiguration represents an declarative configuration of the SubnetParam type for use +// with apply. +type SubnetParamApplyConfiguration struct { + ID *string `json:"id,omitempty"` + Filter *SubnetFilterApplyConfiguration `json:"filter,omitempty"` +} + +// SubnetParamApplyConfiguration constructs an declarative configuration of the SubnetParam type for use with +// apply. +func SubnetParam() *SubnetParamApplyConfiguration { + return &SubnetParamApplyConfiguration{} +} + +// WithID sets the ID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ID field is set to the value of the last call. +func (b *SubnetParamApplyConfiguration) WithID(value string) *SubnetParamApplyConfiguration { + b.ID = &value + return b +} + +// WithFilter sets the Filter field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Filter field is set to the value of the last call. +func (b *SubnetParamApplyConfiguration) WithFilter(value *SubnetFilterApplyConfiguration) *SubnetParamApplyConfiguration { + b.Filter = value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetspec.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetspec.go new file mode 100644 index 000000000..016c4cd98 --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/subnetspec.go @@ -0,0 +1,54 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// SubnetSpecApplyConfiguration represents an declarative configuration of the SubnetSpec type for use +// with apply. +type SubnetSpecApplyConfiguration struct { + DNSNameservers []string `json:"dnsNameservers,omitempty"` + AllocationPools []AllocationPoolApplyConfiguration `json:"allocationPools,omitempty"` +} + +// SubnetSpecApplyConfiguration constructs an declarative configuration of the SubnetSpec type for use with +// apply. +func SubnetSpec() *SubnetSpecApplyConfiguration { + return &SubnetSpecApplyConfiguration{} +} + +// WithDNSNameservers adds the given value to the DNSNameservers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DNSNameservers field. +func (b *SubnetSpecApplyConfiguration) WithDNSNameservers(values ...string) *SubnetSpecApplyConfiguration { + for i := range values { + b.DNSNameservers = append(b.DNSNameservers, values[i]) + } + return b +} + +// WithAllocationPools adds the given value to the AllocationPools field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllocationPools field. +func (b *SubnetSpecApplyConfiguration) WithAllocationPools(values ...*AllocationPoolApplyConfiguration) *SubnetSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllocationPools") + } + b.AllocationPools = append(b.AllocationPools, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/usermanageddiagnostics.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/usermanageddiagnostics.go new file mode 100644 index 000000000..ddf92eccb --- /dev/null +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1/usermanageddiagnostics.go @@ -0,0 +1,38 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// UserManagedDiagnosticsApplyConfiguration represents an declarative configuration of the UserManagedDiagnostics type for use +// with apply. +type UserManagedDiagnosticsApplyConfiguration struct { + StorageAccountURI *string `json:"storageAccountURI,omitempty"` +} + +// UserManagedDiagnosticsApplyConfiguration constructs an declarative configuration of the UserManagedDiagnostics type for use with +// apply. +func UserManagedDiagnostics() *UserManagedDiagnosticsApplyConfiguration { + return &UserManagedDiagnosticsApplyConfiguration{} +} + +// WithStorageAccountURI sets the StorageAccountURI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StorageAccountURI field is set to the value of the last call. +func (b *UserManagedDiagnosticsApplyConfiguration) WithStorageAccountURI(value string) *UserManagedDiagnosticsApplyConfiguration { + b.StorageAccountURI = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1/effects.go b/vendor/github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1/effects.go index 373aff818..ba77d9bda 100644 --- a/vendor/github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1/effects.go +++ b/vendor/github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1/effects.go @@ -25,12 +25,14 @@ import ( // EffectsApplyConfiguration represents an declarative configuration of the Effects type for use // with apply. type EffectsApplyConfiguration struct { - KASGoMemLimit *resource.Quantity `json:"kasGoMemLimit,omitempty"` - ControlPlanePriorityClassName *string `json:"controlPlanePriorityClassName,omitempty"` - EtcdPriorityClassName *string `json:"etcdPriorityClassName,omitempty"` - APICriticalPriorityClassName *string `json:"APICriticalPriorityClassName,omitempty"` - ResourceRequests []ResourceRequestApplyConfiguration `json:"resourceRequests,omitempty"` - MachineHealthCheckTimeout *v1.Duration `json:"machineHealthCheckTimeout,omitempty"` + KASGoMemLimit *resource.Quantity `json:"kasGoMemLimit,omitempty"` + ControlPlanePriorityClassName *string `json:"controlPlanePriorityClassName,omitempty"` + EtcdPriorityClassName *string `json:"etcdPriorityClassName,omitempty"` + APICriticalPriorityClassName *string `json:"APICriticalPriorityClassName,omitempty"` + ResourceRequests []ResourceRequestApplyConfiguration `json:"resourceRequests,omitempty"` + MachineHealthCheckTimeout *v1.Duration `json:"machineHealthCheckTimeout,omitempty"` + MaximumRequestsInflight *int `json:"maximumRequestsInflight,omitempty"` + MaximumMutatingRequestsInflight *int `json:"maximumMutatingRequestsInflight,omitempty"` } // EffectsApplyConfiguration constructs an declarative configuration of the Effects type for use with @@ -91,3 +93,19 @@ func (b *EffectsApplyConfiguration) WithMachineHealthCheckTimeout(value v1.Durat b.MachineHealthCheckTimeout = &value return b } + +// WithMaximumRequestsInflight sets the MaximumRequestsInflight field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaximumRequestsInflight field is set to the value of the last call. +func (b *EffectsApplyConfiguration) WithMaximumRequestsInflight(value int) *EffectsApplyConfiguration { + b.MaximumRequestsInflight = &value + return b +} + +// WithMaximumMutatingRequestsInflight sets the MaximumMutatingRequestsInflight field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaximumMutatingRequestsInflight field is set to the value of the last call. +func (b *EffectsApplyConfiguration) WithMaximumMutatingRequestsInflight(value int) *EffectsApplyConfiguration { + b.MaximumMutatingRequestsInflight = &value + return b +} diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/clientset.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/clientset.go index 1b00f2cfd..8e910c921 100644 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/clientset.go +++ b/vendor/github.com/openshift/hypershift/client/clientset/clientset/clientset.go @@ -22,7 +22,6 @@ import ( "net/http" certificatesv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1" - hypershiftv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1" hypershiftv1beta1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1" schedulingv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/scheduling/v1alpha1" discovery "k8s.io/client-go/discovery" @@ -33,7 +32,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface - HypershiftV1alpha1() hypershiftv1alpha1.HypershiftV1alpha1Interface HypershiftV1beta1() hypershiftv1beta1.HypershiftV1beta1Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface } @@ -42,7 +40,6 @@ type Interface interface { type Clientset struct { *discovery.DiscoveryClient certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client - hypershiftV1alpha1 *hypershiftv1alpha1.HypershiftV1alpha1Client hypershiftV1beta1 *hypershiftv1beta1.HypershiftV1beta1Client schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client } @@ -52,11 +49,6 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al return c.certificatesV1alpha1 } -// HypershiftV1alpha1 retrieves the HypershiftV1alpha1Client -func (c *Clientset) HypershiftV1alpha1() hypershiftv1alpha1.HypershiftV1alpha1Interface { - return c.hypershiftV1alpha1 -} - // HypershiftV1beta1 retrieves the HypershiftV1beta1Client func (c *Clientset) HypershiftV1beta1() hypershiftv1beta1.HypershiftV1beta1Interface { return c.hypershiftV1beta1 @@ -115,10 +107,6 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.hypershiftV1alpha1, err = hypershiftv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } cs.hypershiftV1beta1, err = hypershiftv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -149,7 +137,6 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) - cs.hypershiftV1alpha1 = hypershiftv1alpha1.New(c) cs.hypershiftV1beta1 = hypershiftv1beta1.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/fake/clientset_generated.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/fake/clientset_generated.go index c1d64d05f..8bacb0054 100644 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/fake/clientset_generated.go +++ b/vendor/github.com/openshift/hypershift/client/clientset/clientset/fake/clientset_generated.go @@ -21,8 +21,6 @@ import ( clientset "github.com/openshift/hypershift/client/clientset/clientset" certificatesv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1" fakecertificatesv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1/fake" - hypershiftv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1" - fakehypershiftv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake" hypershiftv1beta1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1" fakehypershiftv1beta1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1/fake" schedulingv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/scheduling/v1alpha1" @@ -89,11 +87,6 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al return &fakecertificatesv1alpha1.FakeCertificatesV1alpha1{Fake: &c.Fake} } -// HypershiftV1alpha1 retrieves the HypershiftV1alpha1Client -func (c *Clientset) HypershiftV1alpha1() hypershiftv1alpha1.HypershiftV1alpha1Interface { - return &fakehypershiftv1alpha1.FakeHypershiftV1alpha1{Fake: &c.Fake} -} - // HypershiftV1beta1 retrieves the HypershiftV1beta1Client func (c *Clientset) HypershiftV1beta1() hypershiftv1beta1.HypershiftV1beta1Interface { return &fakehypershiftv1beta1.FakeHypershiftV1beta1{Fake: &c.Fake} diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/fake/register.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/fake/register.go index 449fb06b8..e68b0346a 100644 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/fake/register.go +++ b/vendor/github.com/openshift/hypershift/client/clientset/clientset/fake/register.go @@ -19,7 +19,6 @@ package fake import ( certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" - hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" schedulingv1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,7 +33,6 @@ var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ certificatesv1alpha1.AddToScheme, - hypershiftv1alpha1.AddToScheme, hypershiftv1beta1.AddToScheme, schedulingv1alpha1.AddToScheme, } diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/scheme/register.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/scheme/register.go index 30f3b97e2..138bf229c 100644 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/scheme/register.go +++ b/vendor/github.com/openshift/hypershift/client/clientset/clientset/scheme/register.go @@ -19,7 +19,6 @@ package scheme import ( certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" - hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" schedulingv1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,7 +33,6 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ certificatesv1alpha1.AddToScheme, - hypershiftv1alpha1.AddToScheme, hypershiftv1beta1.AddToScheme, schedulingv1alpha1.AddToScheme, } diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/doc.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/doc.go deleted file mode 100644 index 0e375e4fc..000000000 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/doc.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/doc.go deleted file mode 100644 index 422564f2d..000000000 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_hostedcluster.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_hostedcluster.go deleted file mode 100644 index 5a0eced12..000000000 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_hostedcluster.go +++ /dev/null @@ -1,188 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - hypershiftv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeHostedClusters implements HostedClusterInterface -type FakeHostedClusters struct { - Fake *FakeHypershiftV1alpha1 - ns string -} - -var hostedclustersResource = v1alpha1.SchemeGroupVersion.WithResource("hostedclusters") - -var hostedclustersKind = v1alpha1.SchemeGroupVersion.WithKind("HostedCluster") - -// Get takes name of the hostedCluster, and returns the corresponding hostedCluster object, and an error if there is any. -func (c *FakeHostedClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.HostedCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(hostedclustersResource, c.ns, name), &v1alpha1.HostedCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.HostedCluster), err -} - -// List takes label and field selectors, and returns the list of HostedClusters that match those selectors. -func (c *FakeHostedClusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.HostedClusterList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(hostedclustersResource, hostedclustersKind, c.ns, opts), &v1alpha1.HostedClusterList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.HostedClusterList{ListMeta: obj.(*v1alpha1.HostedClusterList).ListMeta} - for _, item := range obj.(*v1alpha1.HostedClusterList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested hostedClusters. -func (c *FakeHostedClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(hostedclustersResource, c.ns, opts)) - -} - -// Create takes the representation of a hostedCluster and creates it. Returns the server's representation of the hostedCluster, and an error, if there is any. -func (c *FakeHostedClusters) Create(ctx context.Context, hostedCluster *v1alpha1.HostedCluster, opts v1.CreateOptions) (result *v1alpha1.HostedCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(hostedclustersResource, c.ns, hostedCluster), &v1alpha1.HostedCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.HostedCluster), err -} - -// Update takes the representation of a hostedCluster and updates it. Returns the server's representation of the hostedCluster, and an error, if there is any. -func (c *FakeHostedClusters) Update(ctx context.Context, hostedCluster *v1alpha1.HostedCluster, opts v1.UpdateOptions) (result *v1alpha1.HostedCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(hostedclustersResource, c.ns, hostedCluster), &v1alpha1.HostedCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.HostedCluster), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHostedClusters) UpdateStatus(ctx context.Context, hostedCluster *v1alpha1.HostedCluster, opts v1.UpdateOptions) (*v1alpha1.HostedCluster, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(hostedclustersResource, "status", c.ns, hostedCluster), &v1alpha1.HostedCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.HostedCluster), err -} - -// Delete takes name of the hostedCluster and deletes it. Returns an error if one occurs. -func (c *FakeHostedClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(hostedclustersResource, c.ns, name, opts), &v1alpha1.HostedCluster{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeHostedClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(hostedclustersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.HostedClusterList{}) - return err -} - -// Patch applies the patch and returns the patched hostedCluster. -func (c *FakeHostedClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.HostedCluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(hostedclustersResource, c.ns, name, pt, data, subresources...), &v1alpha1.HostedCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.HostedCluster), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied hostedCluster. -func (c *FakeHostedClusters) Apply(ctx context.Context, hostedCluster *hypershiftv1alpha1.HostedClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.HostedCluster, err error) { - if hostedCluster == nil { - return nil, fmt.Errorf("hostedCluster provided to Apply must not be nil") - } - data, err := json.Marshal(hostedCluster) - if err != nil { - return nil, err - } - name := hostedCluster.Name - if name == nil { - return nil, fmt.Errorf("hostedCluster.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(hostedclustersResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.HostedCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.HostedCluster), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeHostedClusters) ApplyStatus(ctx context.Context, hostedCluster *hypershiftv1alpha1.HostedClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.HostedCluster, err error) { - if hostedCluster == nil { - return nil, fmt.Errorf("hostedCluster provided to Apply must not be nil") - } - data, err := json.Marshal(hostedCluster) - if err != nil { - return nil, err - } - name := hostedCluster.Name - if name == nil { - return nil, fmt.Errorf("hostedCluster.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(hostedclustersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha1.HostedCluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.HostedCluster), err -} diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_hypershift_client.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_hypershift_client.go deleted file mode 100644 index ec67f13de..000000000 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_hypershift_client.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeHypershiftV1alpha1 struct { - *testing.Fake -} - -func (c *FakeHypershiftV1alpha1) HostedClusters(namespace string) v1alpha1.HostedClusterInterface { - return &FakeHostedClusters{c, namespace} -} - -func (c *FakeHypershiftV1alpha1) NodePools(namespace string) v1alpha1.NodePoolInterface { - return &FakeNodePools{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeHypershiftV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_nodepool.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_nodepool.go deleted file mode 100644 index 566677cd6..000000000 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake/fake_nodepool.go +++ /dev/null @@ -1,188 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - hypershiftv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeNodePools implements NodePoolInterface -type FakeNodePools struct { - Fake *FakeHypershiftV1alpha1 - ns string -} - -var nodepoolsResource = v1alpha1.SchemeGroupVersion.WithResource("nodepools") - -var nodepoolsKind = v1alpha1.SchemeGroupVersion.WithKind("NodePool") - -// Get takes name of the nodePool, and returns the corresponding nodePool object, and an error if there is any. -func (c *FakeNodePools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodePool, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(nodepoolsResource, c.ns, name), &v1alpha1.NodePool{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NodePool), err -} - -// List takes label and field selectors, and returns the list of NodePools that match those selectors. -func (c *FakeNodePools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodePoolList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(nodepoolsResource, nodepoolsKind, c.ns, opts), &v1alpha1.NodePoolList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.NodePoolList{ListMeta: obj.(*v1alpha1.NodePoolList).ListMeta} - for _, item := range obj.(*v1alpha1.NodePoolList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested nodePools. -func (c *FakeNodePools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(nodepoolsResource, c.ns, opts)) - -} - -// Create takes the representation of a nodePool and creates it. Returns the server's representation of the nodePool, and an error, if there is any. -func (c *FakeNodePools) Create(ctx context.Context, nodePool *v1alpha1.NodePool, opts v1.CreateOptions) (result *v1alpha1.NodePool, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(nodepoolsResource, c.ns, nodePool), &v1alpha1.NodePool{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NodePool), err -} - -// Update takes the representation of a nodePool and updates it. Returns the server's representation of the nodePool, and an error, if there is any. -func (c *FakeNodePools) Update(ctx context.Context, nodePool *v1alpha1.NodePool, opts v1.UpdateOptions) (result *v1alpha1.NodePool, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(nodepoolsResource, c.ns, nodePool), &v1alpha1.NodePool{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NodePool), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNodePools) UpdateStatus(ctx context.Context, nodePool *v1alpha1.NodePool, opts v1.UpdateOptions) (*v1alpha1.NodePool, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(nodepoolsResource, "status", c.ns, nodePool), &v1alpha1.NodePool{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NodePool), err -} - -// Delete takes name of the nodePool and deletes it. Returns an error if one occurs. -func (c *FakeNodePools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(nodepoolsResource, c.ns, name, opts), &v1alpha1.NodePool{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeNodePools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(nodepoolsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.NodePoolList{}) - return err -} - -// Patch applies the patch and returns the patched nodePool. -func (c *FakeNodePools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodePool, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(nodepoolsResource, c.ns, name, pt, data, subresources...), &v1alpha1.NodePool{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NodePool), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied nodePool. -func (c *FakeNodePools) Apply(ctx context.Context, nodePool *hypershiftv1alpha1.NodePoolApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.NodePool, err error) { - if nodePool == nil { - return nil, fmt.Errorf("nodePool provided to Apply must not be nil") - } - data, err := json.Marshal(nodePool) - if err != nil { - return nil, err - } - name := nodePool.Name - if name == nil { - return nil, fmt.Errorf("nodePool.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(nodepoolsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.NodePool{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NodePool), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeNodePools) ApplyStatus(ctx context.Context, nodePool *hypershiftv1alpha1.NodePoolApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.NodePool, err error) { - if nodePool == nil { - return nil, fmt.Errorf("nodePool provided to Apply must not be nil") - } - data, err := json.Marshal(nodePool) - if err != nil { - return nil, err - } - name := nodePool.Name - if name == nil { - return nil, fmt.Errorf("nodePool.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(nodepoolsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha1.NodePool{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.NodePool), err -} diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/generated_expansion.go deleted file mode 100644 index 02b4d9b16..000000000 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type HostedClusterExpansion interface{} - -type NodePoolExpansion interface{} diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/hostedcluster.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/hostedcluster.go deleted file mode 100644 index eb3fd5bdf..000000000 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/hostedcluster.go +++ /dev/null @@ -1,255 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - hypershiftv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1" - scheme "github.com/openshift/hypershift/client/clientset/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// HostedClustersGetter has a method to return a HostedClusterInterface. -// A group's client should implement this interface. -type HostedClustersGetter interface { - HostedClusters(namespace string) HostedClusterInterface -} - -// HostedClusterInterface has methods to work with HostedCluster resources. -type HostedClusterInterface interface { - Create(ctx context.Context, hostedCluster *v1alpha1.HostedCluster, opts v1.CreateOptions) (*v1alpha1.HostedCluster, error) - Update(ctx context.Context, hostedCluster *v1alpha1.HostedCluster, opts v1.UpdateOptions) (*v1alpha1.HostedCluster, error) - UpdateStatus(ctx context.Context, hostedCluster *v1alpha1.HostedCluster, opts v1.UpdateOptions) (*v1alpha1.HostedCluster, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.HostedCluster, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.HostedClusterList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.HostedCluster, err error) - Apply(ctx context.Context, hostedCluster *hypershiftv1alpha1.HostedClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.HostedCluster, err error) - ApplyStatus(ctx context.Context, hostedCluster *hypershiftv1alpha1.HostedClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.HostedCluster, err error) - HostedClusterExpansion -} - -// hostedClusters implements HostedClusterInterface -type hostedClusters struct { - client rest.Interface - ns string -} - -// newHostedClusters returns a HostedClusters -func newHostedClusters(c *HypershiftV1alpha1Client, namespace string) *hostedClusters { - return &hostedClusters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the hostedCluster, and returns the corresponding hostedCluster object, and an error if there is any. -func (c *hostedClusters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.HostedCluster, err error) { - result = &v1alpha1.HostedCluster{} - err = c.client.Get(). - Namespace(c.ns). - Resource("hostedclusters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HostedClusters that match those selectors. -func (c *hostedClusters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.HostedClusterList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.HostedClusterList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("hostedclusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested hostedClusters. -func (c *hostedClusters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("hostedclusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a hostedCluster and creates it. Returns the server's representation of the hostedCluster, and an error, if there is any. -func (c *hostedClusters) Create(ctx context.Context, hostedCluster *v1alpha1.HostedCluster, opts v1.CreateOptions) (result *v1alpha1.HostedCluster, err error) { - result = &v1alpha1.HostedCluster{} - err = c.client.Post(). - Namespace(c.ns). - Resource("hostedclusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(hostedCluster). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a hostedCluster and updates it. Returns the server's representation of the hostedCluster, and an error, if there is any. -func (c *hostedClusters) Update(ctx context.Context, hostedCluster *v1alpha1.HostedCluster, opts v1.UpdateOptions) (result *v1alpha1.HostedCluster, err error) { - result = &v1alpha1.HostedCluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("hostedclusters"). - Name(hostedCluster.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(hostedCluster). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *hostedClusters) UpdateStatus(ctx context.Context, hostedCluster *v1alpha1.HostedCluster, opts v1.UpdateOptions) (result *v1alpha1.HostedCluster, err error) { - result = &v1alpha1.HostedCluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("hostedclusters"). - Name(hostedCluster.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(hostedCluster). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the hostedCluster and deletes it. Returns an error if one occurs. -func (c *hostedClusters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("hostedclusters"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *hostedClusters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("hostedclusters"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched hostedCluster. -func (c *hostedClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.HostedCluster, err error) { - result = &v1alpha1.HostedCluster{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("hostedclusters"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied hostedCluster. -func (c *hostedClusters) Apply(ctx context.Context, hostedCluster *hypershiftv1alpha1.HostedClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.HostedCluster, err error) { - if hostedCluster == nil { - return nil, fmt.Errorf("hostedCluster provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(hostedCluster) - if err != nil { - return nil, err - } - name := hostedCluster.Name - if name == nil { - return nil, fmt.Errorf("hostedCluster.Name must be provided to Apply") - } - result = &v1alpha1.HostedCluster{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("hostedclusters"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *hostedClusters) ApplyStatus(ctx context.Context, hostedCluster *hypershiftv1alpha1.HostedClusterApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.HostedCluster, err error) { - if hostedCluster == nil { - return nil, fmt.Errorf("hostedCluster provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(hostedCluster) - if err != nil { - return nil, err - } - - name := hostedCluster.Name - if name == nil { - return nil, fmt.Errorf("hostedCluster.Name must be provided to Apply") - } - - result = &v1alpha1.HostedCluster{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("hostedclusters"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/hypershift_client.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/hypershift_client.go deleted file mode 100644 index 6b97d687a..000000000 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/hypershift_client.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "net/http" - - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - "github.com/openshift/hypershift/client/clientset/clientset/scheme" - rest "k8s.io/client-go/rest" -) - -type HypershiftV1alpha1Interface interface { - RESTClient() rest.Interface - HostedClustersGetter - NodePoolsGetter -} - -// HypershiftV1alpha1Client is used to interact with features provided by the hypershift.openshift.io group. -type HypershiftV1alpha1Client struct { - restClient rest.Interface -} - -func (c *HypershiftV1alpha1Client) HostedClusters(namespace string) HostedClusterInterface { - return newHostedClusters(c, namespace) -} - -func (c *HypershiftV1alpha1Client) NodePools(namespace string) NodePoolInterface { - return newNodePools(c, namespace) -} - -// NewForConfig creates a new HypershiftV1alpha1Client for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*HypershiftV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(&config) - if err != nil { - return nil, err - } - return NewForConfigAndClient(&config, httpClient) -} - -// NewForConfigAndClient creates a new HypershiftV1alpha1Client for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*HypershiftV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientForConfigAndClient(&config, h) - if err != nil { - return nil, err - } - return &HypershiftV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new HypershiftV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *HypershiftV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new HypershiftV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *HypershiftV1alpha1Client { - return &HypershiftV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *HypershiftV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/nodepool.go b/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/nodepool.go deleted file mode 100644 index 9ed18edc5..000000000 --- a/vendor/github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/nodepool.go +++ /dev/null @@ -1,255 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - hypershiftv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1" - scheme "github.com/openshift/hypershift/client/clientset/clientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// NodePoolsGetter has a method to return a NodePoolInterface. -// A group's client should implement this interface. -type NodePoolsGetter interface { - NodePools(namespace string) NodePoolInterface -} - -// NodePoolInterface has methods to work with NodePool resources. -type NodePoolInterface interface { - Create(ctx context.Context, nodePool *v1alpha1.NodePool, opts v1.CreateOptions) (*v1alpha1.NodePool, error) - Update(ctx context.Context, nodePool *v1alpha1.NodePool, opts v1.UpdateOptions) (*v1alpha1.NodePool, error) - UpdateStatus(ctx context.Context, nodePool *v1alpha1.NodePool, opts v1.UpdateOptions) (*v1alpha1.NodePool, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.NodePool, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.NodePoolList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodePool, err error) - Apply(ctx context.Context, nodePool *hypershiftv1alpha1.NodePoolApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.NodePool, err error) - ApplyStatus(ctx context.Context, nodePool *hypershiftv1alpha1.NodePoolApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.NodePool, err error) - NodePoolExpansion -} - -// nodePools implements NodePoolInterface -type nodePools struct { - client rest.Interface - ns string -} - -// newNodePools returns a NodePools -func newNodePools(c *HypershiftV1alpha1Client, namespace string) *nodePools { - return &nodePools{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the nodePool, and returns the corresponding nodePool object, and an error if there is any. -func (c *nodePools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodePool, err error) { - result = &v1alpha1.NodePool{} - err = c.client.Get(). - Namespace(c.ns). - Resource("nodepools"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NodePools that match those selectors. -func (c *nodePools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodePoolList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.NodePoolList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("nodepools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodePools. -func (c *nodePools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("nodepools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a nodePool and creates it. Returns the server's representation of the nodePool, and an error, if there is any. -func (c *nodePools) Create(ctx context.Context, nodePool *v1alpha1.NodePool, opts v1.CreateOptions) (result *v1alpha1.NodePool, err error) { - result = &v1alpha1.NodePool{} - err = c.client.Post(). - Namespace(c.ns). - Resource("nodepools"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(nodePool). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a nodePool and updates it. Returns the server's representation of the nodePool, and an error, if there is any. -func (c *nodePools) Update(ctx context.Context, nodePool *v1alpha1.NodePool, opts v1.UpdateOptions) (result *v1alpha1.NodePool, err error) { - result = &v1alpha1.NodePool{} - err = c.client.Put(). - Namespace(c.ns). - Resource("nodepools"). - Name(nodePool.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(nodePool). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *nodePools) UpdateStatus(ctx context.Context, nodePool *v1alpha1.NodePool, opts v1.UpdateOptions) (result *v1alpha1.NodePool, err error) { - result = &v1alpha1.NodePool{} - err = c.client.Put(). - Namespace(c.ns). - Resource("nodepools"). - Name(nodePool.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(nodePool). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the nodePool and deletes it. Returns an error if one occurs. -func (c *nodePools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("nodepools"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nodePools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("nodepools"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched nodePool. -func (c *nodePools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NodePool, err error) { - result = &v1alpha1.NodePool{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("nodepools"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied nodePool. -func (c *nodePools) Apply(ctx context.Context, nodePool *hypershiftv1alpha1.NodePoolApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.NodePool, err error) { - if nodePool == nil { - return nil, fmt.Errorf("nodePool provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(nodePool) - if err != nil { - return nil, err - } - name := nodePool.Name - if name == nil { - return nil, fmt.Errorf("nodePool.Name must be provided to Apply") - } - result = &v1alpha1.NodePool{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("nodepools"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *nodePools) ApplyStatus(ctx context.Context, nodePool *hypershiftv1alpha1.NodePoolApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.NodePool, err error) { - if nodePool == nil { - return nil, fmt.Errorf("nodePool provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(nodePool) - if err != nil { - return nil, err - } - - name := nodePool.Name - if name == nil { - return nil, fmt.Errorf("nodePool.Name must be provided to Apply") - } - - result = &v1alpha1.NodePool{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("nodepools"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/hypershift/client/informers/externalversions/generic.go b/vendor/github.com/openshift/hypershift/client/informers/externalversions/generic.go index 95a087da9..eae9d0ff7 100644 --- a/vendor/github.com/openshift/hypershift/client/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/hypershift/client/informers/externalversions/generic.go @@ -21,7 +21,6 @@ import ( "fmt" v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" - hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" schedulingv1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -60,12 +59,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1alpha1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals"): return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1alpha1().CertificateSigningRequestApprovals().Informer()}, nil - // Group=hypershift.openshift.io, Version=v1alpha1 - case hypershiftv1alpha1.SchemeGroupVersion.WithResource("hostedclusters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Hypershift().V1alpha1().HostedClusters().Informer()}, nil - case hypershiftv1alpha1.SchemeGroupVersion.WithResource("nodepools"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Hypershift().V1alpha1().NodePools().Informer()}, nil - // Group=hypershift.openshift.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hypershift().V1beta1().CertificateSigningRequestApprovals().Informer()}, nil diff --git a/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/interface.go b/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/interface.go index 59d4f1be3..49f26bc40 100644 --- a/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/interface.go +++ b/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/interface.go @@ -18,15 +18,12 @@ limitations under the License. package hypershift import ( - v1alpha1 "github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1" v1beta1 "github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1beta1" internalinterfaces "github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces" ) // Interface provides access to each of this group's versions. type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -42,11 +39,6 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) -} - // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/hostedcluster.go b/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/hostedcluster.go deleted file mode 100644 index c4d4221df..000000000 --- a/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/hostedcluster.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - clientset "github.com/openshift/hypershift/client/clientset/clientset" - internalinterfaces "github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/hypershift/client/listers/hypershift/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// HostedClusterInformer provides access to a shared informer and lister for -// HostedClusters. -type HostedClusterInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.HostedClusterLister -} - -type hostedClusterInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewHostedClusterInformer constructs a new informer for HostedCluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewHostedClusterInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredHostedClusterInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredHostedClusterInformer constructs a new informer for HostedCluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredHostedClusterInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.HypershiftV1alpha1().HostedClusters(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.HypershiftV1alpha1().HostedClusters(namespace).Watch(context.TODO(), options) - }, - }, - &hypershiftv1alpha1.HostedCluster{}, - resyncPeriod, - indexers, - ) -} - -func (f *hostedClusterInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredHostedClusterInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *hostedClusterInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&hypershiftv1alpha1.HostedCluster{}, f.defaultInformer) -} - -func (f *hostedClusterInformer) Lister() v1alpha1.HostedClusterLister { - return v1alpha1.NewHostedClusterLister(f.Informer().GetIndexer()) -} diff --git a/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/interface.go b/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/interface.go deleted file mode 100644 index b0a593168..000000000 --- a/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/interface.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - internalinterfaces "github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // HostedClusters returns a HostedClusterInformer. - HostedClusters() HostedClusterInformer - // NodePools returns a NodePoolInformer. - NodePools() NodePoolInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// HostedClusters returns a HostedClusterInformer. -func (v *version) HostedClusters() HostedClusterInformer { - return &hostedClusterInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// NodePools returns a NodePoolInformer. -func (v *version) NodePools() NodePoolInformer { - return &nodePoolInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/nodepool.go b/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/nodepool.go deleted file mode 100644 index 8f9f3dc2a..000000000 --- a/vendor/github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1/nodepool.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - clientset "github.com/openshift/hypershift/client/clientset/clientset" - internalinterfaces "github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/openshift/hypershift/client/listers/hypershift/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// NodePoolInformer provides access to a shared informer and lister for -// NodePools. -type NodePoolInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.NodePoolLister -} - -type nodePoolInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewNodePoolInformer constructs a new informer for NodePool type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewNodePoolInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredNodePoolInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredNodePoolInformer constructs a new informer for NodePool type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredNodePoolInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.HypershiftV1alpha1().NodePools(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.HypershiftV1alpha1().NodePools(namespace).Watch(context.TODO(), options) - }, - }, - &hypershiftv1alpha1.NodePool{}, - resyncPeriod, - indexers, - ) -} - -func (f *nodePoolInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredNodePoolInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *nodePoolInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&hypershiftv1alpha1.NodePool{}, f.defaultInformer) -} - -func (f *nodePoolInformer) Lister() v1alpha1.NodePoolLister { - return v1alpha1.NewNodePoolLister(f.Informer().GetIndexer()) -} diff --git a/vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/expansion_generated.go b/vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/expansion_generated.go deleted file mode 100644 index e6b74d647..000000000 --- a/vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/expansion_generated.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -// HostedClusterListerExpansion allows custom methods to be added to -// HostedClusterLister. -type HostedClusterListerExpansion interface{} - -// HostedClusterNamespaceListerExpansion allows custom methods to be added to -// HostedClusterNamespaceLister. -type HostedClusterNamespaceListerExpansion interface{} - -// NodePoolListerExpansion allows custom methods to be added to -// NodePoolLister. -type NodePoolListerExpansion interface{} - -// NodePoolNamespaceListerExpansion allows custom methods to be added to -// NodePoolNamespaceLister. -type NodePoolNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/hostedcluster.go b/vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/hostedcluster.go deleted file mode 100644 index be00b4434..000000000 --- a/vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/hostedcluster.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// HostedClusterLister helps list HostedClusters. -// All objects returned here must be treated as read-only. -type HostedClusterLister interface { - // List lists all HostedClusters in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.HostedCluster, err error) - // HostedClusters returns an object that can list and get HostedClusters. - HostedClusters(namespace string) HostedClusterNamespaceLister - HostedClusterListerExpansion -} - -// hostedClusterLister implements the HostedClusterLister interface. -type hostedClusterLister struct { - indexer cache.Indexer -} - -// NewHostedClusterLister returns a new HostedClusterLister. -func NewHostedClusterLister(indexer cache.Indexer) HostedClusterLister { - return &hostedClusterLister{indexer: indexer} -} - -// List lists all HostedClusters in the indexer. -func (s *hostedClusterLister) List(selector labels.Selector) (ret []*v1alpha1.HostedCluster, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.HostedCluster)) - }) - return ret, err -} - -// HostedClusters returns an object that can list and get HostedClusters. -func (s *hostedClusterLister) HostedClusters(namespace string) HostedClusterNamespaceLister { - return hostedClusterNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// HostedClusterNamespaceLister helps list and get HostedClusters. -// All objects returned here must be treated as read-only. -type HostedClusterNamespaceLister interface { - // List lists all HostedClusters in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.HostedCluster, err error) - // Get retrieves the HostedCluster from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.HostedCluster, error) - HostedClusterNamespaceListerExpansion -} - -// hostedClusterNamespaceLister implements the HostedClusterNamespaceLister -// interface. -type hostedClusterNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HostedClusters in the indexer for a given namespace. -func (s hostedClusterNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.HostedCluster, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.HostedCluster)) - }) - return ret, err -} - -// Get retrieves the HostedCluster from the indexer for a given namespace and name. -func (s hostedClusterNamespaceLister) Get(name string) (*v1alpha1.HostedCluster, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("hostedcluster"), name) - } - return obj.(*v1alpha1.HostedCluster), nil -} diff --git a/vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/nodepool.go b/vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/nodepool.go deleted file mode 100644 index 5298d2fab..000000000 --- a/vendor/github.com/openshift/hypershift/client/listers/hypershift/v1alpha1/nodepool.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// NodePoolLister helps list NodePools. -// All objects returned here must be treated as read-only. -type NodePoolLister interface { - // List lists all NodePools in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.NodePool, err error) - // NodePools returns an object that can list and get NodePools. - NodePools(namespace string) NodePoolNamespaceLister - NodePoolListerExpansion -} - -// nodePoolLister implements the NodePoolLister interface. -type nodePoolLister struct { - indexer cache.Indexer -} - -// NewNodePoolLister returns a new NodePoolLister. -func NewNodePoolLister(indexer cache.Indexer) NodePoolLister { - return &nodePoolLister{indexer: indexer} -} - -// List lists all NodePools in the indexer. -func (s *nodePoolLister) List(selector labels.Selector) (ret []*v1alpha1.NodePool, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.NodePool)) - }) - return ret, err -} - -// NodePools returns an object that can list and get NodePools. -func (s *nodePoolLister) NodePools(namespace string) NodePoolNamespaceLister { - return nodePoolNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// NodePoolNamespaceLister helps list and get NodePools. -// All objects returned here must be treated as read-only. -type NodePoolNamespaceLister interface { - // List lists all NodePools in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.NodePool, err error) - // Get retrieves the NodePool from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.NodePool, error) - NodePoolNamespaceListerExpansion -} - -// nodePoolNamespaceLister implements the NodePoolNamespaceLister -// interface. -type nodePoolNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all NodePools in the indexer for a given namespace. -func (s nodePoolNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.NodePool, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.NodePool)) - }) - return ret, err -} - -// Get retrieves the NodePool from the indexer for a given namespace and name. -func (s nodePoolNamespaceLister) Get(name string) (*v1alpha1.NodePool, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("nodepool"), name) - } - return obj.(*v1alpha1.NodePool), nil -} diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30e..b9cc55abb 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 000000000..65d761bc9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 000000000..8547c8dfd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 000000000..2e45780b7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index bcfa4fa10..cc4ef1077 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -37,6 +37,9 @@ var ( // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. // e.g. go_sched_goroutines_goroutines MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} + // MetricsDebug allows only debug metrics to be collected from Go runtime. + // e.g. go_godebug_non_default_behavior_gocachetest_events_total + MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)} ) // WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: @@ -44,7 +47,6 @@ var ( // go_memstats_alloc_bytes // go_memstats_alloc_bytes_total // go_memstats_sys_bytes -// go_memstats_lookups_total // go_memstats_mallocs_total // go_memstats_frees_total // go_memstats_heap_alloc_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5e..520cbd7d4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64f..511746417 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb39..519db348a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + if h.nativeExemplars.isEnabled() { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1371,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + + exemplars []*dto.Exemplar +} + +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + ttl = -1 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if !n.isEnabled() { + return + } + + n.Lock() + defer n.Unlock() + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + var nIdx int + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if nIdx == -1 && *e.Value <= *exemplar.Value { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + rIdx = i + } else { + rIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. + md = diff + rIdx = nIdx - 1 + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx + } + } + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d64..a4fa6eabd 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e5723..9d9b81ab4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18e..62a4e7ad9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136cee..14d56d2d0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b8..315eab5f1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fbe..e598e66e6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling @@ -343,9 +370,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +418,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25a..c6fd2f58b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 146270444..1ab0e4796 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go index f52ad9eab..e1441598d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -30,4 +30,5 @@ var defaultValidations = []Validation{ validations.LintReservedChars, validations.LintCamelCase, validations.LintUnitAbbreviations, + validations.LintDuplicateMetric, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go new file mode 100644 index 000000000..fdc1e6239 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "fmt" + "reflect" + + dto "github.com/prometheus/client_model/go" +) + +// LintDuplicateMetric detects duplicate metric. +func LintDuplicateMetric(mf *dto.MetricFamily) []error { + var problems []error + + for i, m := range mf.Metric { + for _, k := range mf.Metric[i+1:] { + if reflect.DeepEqual(m.Label, k.Label) { + problems = append(problems, fmt.Errorf("metric not unique")) + break + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go index bc8dbd1e1..de52cfee4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -44,21 +44,21 @@ func LintMetricUnits(mf *dto.MetricFamily) []error { return problems } -// LintMetricTypeInName detects when metric types are included in the metric name. +// LintMetricTypeInName detects when the metric type is included in the metric name. func LintMetricTypeInName(mf *dto.MetricFamily) []error { + if mf.GetType() == dto.MetricType_UNTYPED { + return nil + } + var problems []error - n := strings.ToLower(mf.GetName()) - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue - } + n := strings.ToLower(mf.GetName()) + typename := strings.ToLower(mf.GetType().String()) - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) - } + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) } + return problems } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 9dce15eaf..6f1200180 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -42,9 +42,8 @@ import ( "fmt" "io" "net/http" - "reflect" - "github.com/davecgh/go-spew/spew" + "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "google.golang.org/protobuf/proto" @@ -159,6 +158,9 @@ func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { // ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in // plain text format. Then it compares it with the results that the `expected` would return. // If the `metricNames` is not empty it would filter the comparison only to the given metric names. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and scraped metrics. See https://github.com/prometheus/client_golang/issues/1351. func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error { resp, err := http.Get(url) if err != nil { @@ -184,9 +186,11 @@ func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) err return compareMetricFamilies(scraped, wanted, metricNames...) } -// CollectAndCompare registers the provided Collector with a newly created -// pedantic Registry. It then calls GatherAndCompare with that Registry and with -// the provided metricNames. +// CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text +// exposition format to the data read from expected. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and collected metrics. See https://github.com/prometheus/client_golang/issues/1351. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { @@ -199,6 +203,9 @@ func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames . // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...) } @@ -207,6 +214,9 @@ func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ... // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error { got, done, err := g.Gather() defer done() @@ -222,6 +232,31 @@ func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected return compareMetricFamilies(got, wanted, metricNames...) } +// CollectAndFormat collects the metrics identified by `metricNames` and returns them in the given format. +func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNames ...string) ([]byte, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %w", err) + } + + gotFiltered, err := reg.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %w", err) + } + + gotFiltered = filterMetrics(gotFiltered, metricNames) + + var gotFormatted bytes.Buffer + enc := expfmt.NewEncoder(&gotFormatted, expfmt.NewFormat(format)) + for _, mf := range gotFiltered { + if err := enc.Encode(mf); err != nil { + return nil, fmt.Errorf("encoding gathered metrics failed: %w", err) + } + } + + return gotFormatted.Bytes(), nil +} + // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { @@ -277,73 +312,12 @@ func compare(got, want []*dto.MetricFamily) error { return fmt.Errorf("encoding expected metrics failed: %w", err) } } - if diffErr := diff(wantBuf, gotBuf); diffErr != "" { + if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { return fmt.Errorf(diffErr) } return nil } -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ - A: internal.SplitLines(e), - B: internal.SplitLines(a), - FromFile: "metric output does not match expectation; want", - FromDate: "", - ToFile: "got:", - ToDate: "", - Context: 1, - }) - - if diff == "" { - return "" - } - - return "\n\nDiff:\n" + diff -} - -// typeAndKind returns the type and kind of the given interface{} -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { var filtered []*dto.MetricFamily for _, m := range metrics { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59f..2c808eece 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa216..1448439b7 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9d..cf0c150c2 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd1..d942af8ed 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, fmt.Errorf("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93f..11c8ff4b9 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a9..4b86434b3 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211a..f085a923f 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22f..73b7aa3e6 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b46..abb2c9001 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685e..000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a5..f50966bc4 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -34,10 +34,13 @@ var ( // goroutines are started. NameValidationScheme = LegacyValidation - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -161,7 +164,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +179,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +211,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +233,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +243,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,10 +259,10 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } @@ -283,7 +286,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -309,7 +312,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") @@ -452,6 +455,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index 6b5177fc3..21b995042 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.14" + Version = "3.5.15" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go index 34f35b9f2..f0f3739aa 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go @@ -58,7 +58,7 @@ var DefaultZapLoggerConfig = zap.Config{ // Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) { - enc.AppendString(t.Format("2006-01-02T15:04:05.999999Z0700")) + enc.AppendString(t.Format("2006-01-02T15:04:05.000000Z0700")) }, EncodeDuration: zapcore.StringDurationEncoder, diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go index 150545d08..a7d37688d 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go @@ -180,12 +180,23 @@ type TLSInfo struct { parseFunc func([]byte, []byte) (tls.Certificate, error) // AllowedCN is a CN which must be provided by a client. + // + // Deprecated: use AllowedCNs instead. AllowedCN string // AllowedHostname is an IP address or hostname that must match the TLS // certificate provided by a client. + // + // Deprecated: use AllowedHostnames instead. AllowedHostname string + // AllowedCNs is a list of acceptable CNs which must be provided by a client. + AllowedCNs []string + + // AllowedHostnames is a list of acceptable IP addresses or hostnames that must match the + // TLS certificate provided by a client. + AllowedHostnames []string + // Logger logs TLS errors. // If nil, all logs are discarded. Logger *zap.Logger @@ -407,19 +418,52 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { // Client certificates may be verified by either an exact match on the CN, // or a more general check of the CN and SANs. var verifyCertificate func(*x509.Certificate) bool + + if info.AllowedCN != "" && len(info.AllowedCNs) > 0 { + return nil, fmt.Errorf("AllowedCN and AllowedCNs are mutually exclusive (cn=%q, cns=%q)", info.AllowedCN, info.AllowedCNs) + } + if info.AllowedHostname != "" && len(info.AllowedHostnames) > 0 { + return nil, fmt.Errorf("AllowedHostname and AllowedHostnames are mutually exclusive (hostname=%q, hostnames=%q)", info.AllowedHostname, info.AllowedHostnames) + } + if info.AllowedCN != "" && info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + if len(info.AllowedCNs) > 0 && len(info.AllowedHostnames) > 0 { + return nil, fmt.Errorf("AllowedCNs and AllowedHostnames are mutually exclusive (cns=%q, hostnames=%q)", info.AllowedCNs, info.AllowedHostnames) + } + if info.AllowedCN != "" { - if info.AllowedHostname != "" { - return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) - } + info.Logger.Warn("AllowedCN is deprecated, use AllowedCNs instead") verifyCertificate = func(cert *x509.Certificate) bool { return info.AllowedCN == cert.Subject.CommonName } } if info.AllowedHostname != "" { + info.Logger.Warn("AllowedHostname is deprecated, use AllowedHostnames instead") verifyCertificate = func(cert *x509.Certificate) bool { return cert.VerifyHostname(info.AllowedHostname) == nil } } + if len(info.AllowedCNs) > 0 { + verifyCertificate = func(cert *x509.Certificate) bool { + for _, allowedCN := range info.AllowedCNs { + if allowedCN == cert.Subject.CommonName { + return true + } + } + return false + } + } + if len(info.AllowedHostnames) > 0 { + verifyCertificate = func(cert *x509.Certificate) bool { + for _, allowedHostname := range info.AllowedHostnames { + if cert.VerifyHostname(allowedHostname) == nil { + return true + } + } + return false + } + } if verifyCertificate != nil { cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { for _, chains := range verifiedChains { diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go index 41a6ec976..963f7e65c 100644 --- a/vendor/go.etcd.io/etcd/client/v3/watch.go +++ b/vendor/go.etcd.io/etcd/client/v3/watch.go @@ -1036,7 +1036,7 @@ func (pr *progressRequest) toPB() *pb.WatchRequest { func streamKeyFromCtx(ctx context.Context) string { if md, ok := metadata.FromOutgoingContext(ctx); ok { - return fmt.Sprintf("%+v", md) + return fmt.Sprintf("%+v", map[string][]string(md)) } return "" } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664b..ae8577ef3 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6d9c8b649..dbfb2a165 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,6 +9,8 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck - errorlint @@ -23,6 +25,7 @@ linters: - revive - staticcheck - tenv + - testifylint - typecheck - unconvert - unused @@ -62,12 +65,12 @@ issues: - path: _test\.go linters: - gosec - # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec - # Igonoring gosec G402: TLS MinVersion too low + # Ignoring gosec G402: TLS MinVersion too low # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: @@ -124,8 +127,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" @@ -300,3 +301,9 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c01e6998e..8f68dbd04 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,158 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + ## [1.28.0/0.50.0/0.4.0] 2024-07-02 ### Added @@ -49,6 +201,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix stale timestamps reported by the last-value aggregation. (#5517) - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) ## [1.27.0/0.49.0/0.3.0] 2024-05-21 @@ -175,7 +328,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- ARM64 platform to the compatibility testing suite. (#4994) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -1836,7 +1989,7 @@ with major version 0. - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) @@ -2410,7 +2563,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) +- Update otel-collector example to use the v0.5.0 collector. (#915) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. @@ -3003,7 +3156,11 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 @@ -3086,6 +3243,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 202554933..945a07d2b 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,13 +5,13 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index b86572f58..22a2e9dbd 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -578,7 +578,10 @@ See also: The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the -absence of race conditions. +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. ### Internal packages @@ -626,13 +629,14 @@ should be canceled. ## Approvers and Maintainers -### Approvers +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent -- [Chester Cheung](https://github.com/hanyuancheung), Tencent +### Approvers ### Maintainers -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk @@ -641,16 +645,18 @@ should be canceled. ### Emeritus -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Evan Torrie](https://github.com/evantorrie), Yahoo +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index f33619f76..b8292a4fb 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -145,12 +142,14 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* @@ -178,17 +177,14 @@ test-coverage: $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix @@ -264,7 +260,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5a8909317..efec27890 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -47,20 +47,22 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -87,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. ### Export diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 940f57f3d..ffa9b6125 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. @@ -110,17 +111,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ## Post-Release ### Contrib Repository diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index bff9c7fdb..6cbefcead 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [1]KeyValue(kvs) case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [2]KeyValue(kvs) case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [3]KeyValue(kvs) case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [4]KeyValue(kvs) case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [5]KeyValue(kvs) case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [6]KeyValue(kvs) case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [7]KeyValue(kvs) case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [8]KeyValue(kvs) case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [9]KeyValue(kvs) case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [10]KeyValue(kvs) default: return nil } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c40c896cc..36f536703 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -44,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) { // Notice: Consider using [NewKeyValuePropertyRaw] instead // that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -115,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -203,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -230,6 +265,10 @@ type Member struct { // Notice: Consider using [NewMemberRaw] instead // that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } // Decode a percent-encoded value. - value, err := url.PathUnescape(val) + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -314,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { s += propertyDelimiter + m.properties.String() @@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -469,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -528,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) { } // Decode a percent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ '~': true, } +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool { return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index df29d96a6..2acbac354 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 441c59501..921f85961 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 3a0cc42f6..ae92a4251 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index cfd1df9bf..a6acd8dca 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,8 +5,9 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" + "reflect" "sync" - "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" @@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -76,7 +78,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return val } - t := &meter{name: name, opts: opts} + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} p.meters[key] = t return t } @@ -92,17 +94,29 @@ type meter struct { opts []metric.MeterOption mtx sync.Mutex - instruments []delegatedInstrument + instruments map[instID]delegatedInstrument registry list.List - delegate atomic.Value // metric.Meter + delegate metric.Meter } type delegatedInstrument interface { setDelegate(metric.Meter) } +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // @@ -110,12 +124,12 @@ type delegatedInstrument interface { // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { - meter := provider.Meter(m.name, m.opts...) - m.delegate.Store(meter) - m.mtx.Lock() defer m.mtx.Unlock() + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + for _, inst := range m.instruments { inst.setDelegate(meter) } @@ -133,169 +147,336 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } i := &siCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } i := &siUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } i := &siHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) + } + + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } i := &siGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } i := &aiCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } i := &aiUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } i := &aiGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } i := &sfCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } i := &sfUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } i := &sfHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } i := &sfGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } i := &afCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } i := &afUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } i := &afGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - insts = unwrapInstruments(insts) - return del.RegisterCallback(f, insts...) - } - m.mtx.Lock() defer m.mtx.Unlock() + if m.delegate != nil { + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) + } + reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { @@ -307,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -335,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} + +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -346,9 +575,10 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) + return } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index e31f442b4..ac65262c6 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -87,6 +87,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -102,7 +103,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct{ name, version, schema string } +type il struct { + name string + version string + schema string + attrs attribute.Set +} // tracer is a placeholder for a trace.Tracer. // diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 3e7bb3b35..b2fe3e41d 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -20,11 +20,13 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { - return uint64(i) + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -36,9 +38,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index cf23db778..f8435d8f2 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -213,7 +213,7 @@ type Float64Observer interface { } // Float64Callback is a function registered with a Meter that makes -// observations for a Float64Observerable instrument it is registered with. +// observations for a Float64Observable instrument it is registered with. // Calls to the Float64Observer record measurement values for the // Float64Observable. // diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index c82ba5324..e079aaef1 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -212,7 +212,7 @@ type Int64Observer interface { } // Int64Callback is a function registered with a Meter that makes observations -// for an Int64Observerable instrument it is registered with. Calls to the +// for an Int64Observable instrument it is registered with. Calls to the // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index ea52e4023..a535782e1 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) +// WithAttributeSet(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 6a7991e01..14e08c24a 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -52,6 +52,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational @@ -61,6 +62,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a @@ -70,6 +72,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. @@ -78,6 +81,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -92,6 +96,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -106,6 +111,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -130,6 +136,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational @@ -139,6 +146,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a @@ -148,6 +156,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. @@ -156,6 +165,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -170,6 +180,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -184,6 +195,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -242,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 8c5ac55ca..0a29a2f13 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -19,6 +19,14 @@ "matchManagers": ["gomod"], "matchDepTypes": ["indirect"], "enabled": false + }, + { + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" } ] } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go index ada857995..d5197e16c 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -115,7 +115,7 @@ func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { name = hostPart } if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) + port = int(parsedPort) // nolint: gosec // Bit size of 16 checked above. } return } diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 5650a174b..8c45a7107 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index d661c5d10..cdbf41d6d 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -96,7 +96,7 @@ can embed the API interface directly. This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of -[go.opentelemetry.io/otel/trace], which may be done with a trasitive +[go.opentelemetry.io/otel/trace], which may be done with a transitive dependency. Finally, an author can embed another implementation in theirs. The embedded diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 000000000..ef85cb70c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 000000000..d3aa476ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 28877d4ab..d49adf671 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 000000000..77952d2a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 20b5cf243..dc5e34cad 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index e57bf57fc..000000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 000000000..c9b7cdbbf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ab2896052..59e248161 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.28.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 241cfc82a..c04b12f6b 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,19 +3,13 @@ module-sets: stable-v1: - version: v1.28.0 + version: v1.32.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -29,21 +23,20 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.50.0 + version: v0.54.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.4.0 + version: v0.8.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.8 + version: v0.0.11 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go index 333da285b..bd896bdc7 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego +//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go similarity index 95% rename from vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go rename to vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go index 4aec4874b..1a1679aaa 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (ppc64 || ppc64le) package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s similarity index 89% rename from vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s rename to vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s index b3c1699bf..6899a1dab 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s @@ -2,15 +2,25 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (ppc64 || ppc64le) #include "textflag.h" // This was ported from the amd64 implementation. +#ifdef GOARCH_ppc64le +#define LE_MOVD MOVD +#define LE_MOVWZ MOVWZ +#define LE_MOVHZ MOVHZ +#else +#define LE_MOVD MOVDBR +#define LE_MOVWZ MOVWBR +#define LE_MOVHZ MOVHBR +#endif + #define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ - MOVD (msg), t0; \ - MOVD 8(msg), t1; \ + LE_MOVD (msg)( R0), t0; \ + LE_MOVD (msg)(R24), t1; \ MOVD $1, t2; \ ADDC t0, h0, h0; \ ADDE t1, h1, h1; \ @@ -50,10 +60,6 @@ ADDE t3, h1, h1; \ ADDZE h2 -DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -GLOBL ·poly1305Mask<>(SB), RODATA, $16 - // func update(state *[7]uint64, msg []byte) TEXT ·update(SB), $0-32 MOVD state+0(FP), R3 @@ -66,6 +72,8 @@ TEXT ·update(SB), $0-32 MOVD 24(R3), R11 // r0 MOVD 32(R3), R12 // r1 + MOVD $8, R24 + CMP R5, $16 BLT bytes_between_0_and_15 @@ -94,7 +102,7 @@ flush_buffer: // Greater than 8 -- load the rightmost remaining bytes in msg // and put into R17 (h1) - MOVD (R4)(R21), R17 + LE_MOVD (R4)(R21), R17 MOVD $16, R22 // Find the offset to those bytes @@ -118,7 +126,7 @@ just1: BLT less8 // Exactly 8 - MOVD (R4), R16 + LE_MOVD (R4), R16 CMP R17, $0 @@ -133,7 +141,7 @@ less8: MOVD $0, R22 // shift count CMP R5, $4 BLT less4 - MOVWZ (R4), R16 + LE_MOVWZ (R4), R16 ADD $4, R4 ADD $-4, R5 MOVD $32, R22 @@ -141,7 +149,7 @@ less8: less4: CMP R5, $2 BLT less2 - MOVHZ (R4), R21 + LE_MOVHZ (R4), R21 SLD R22, R21, R21 OR R16, R21, R16 ADD $16, R22 diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 3a7e5ab17..885c4c593 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order: if err != nil { // ... } - var f func(*html.Node) - f = func(n *html.Node) { + for n := range doc.Descendants() { if n.Type == html.ElementNode && n.Data == "a" { // Do something with n... } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } } - f(doc) The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and diff --git a/vendor/golang.org/x/net/html/iter.go b/vendor/golang.org/x/net/html/iter.go new file mode 100644 index 000000000..54be8fd30 --- /dev/null +++ b/vendor/golang.org/x/net/html/iter.go @@ -0,0 +1,56 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package html + +import "iter" + +// Ancestors returns an iterator over the ancestors of n, starting with n.Parent. +// +// Mutating a Node or its parents while iterating may have unexpected results. +func (n *Node) Ancestors() iter.Seq[*Node] { + _ = n.Parent // eager nil check + + return func(yield func(*Node) bool) { + for p := n.Parent; p != nil && yield(p); p = p.Parent { + } + } +} + +// ChildNodes returns an iterator over the immediate children of n, +// starting with n.FirstChild. +// +// Mutating a Node or its children while iterating may have unexpected results. +func (n *Node) ChildNodes() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling { + } + } + +} + +// Descendants returns an iterator over all nodes recursively beneath +// n, excluding n itself. Nodes are visited in depth-first preorder. +// +// Mutating a Node or its descendants while iterating may have unexpected results. +func (n *Node) Descendants() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + n.descendants(yield) + } +} + +func (n *Node) descendants(yield func(*Node) bool) bool { + for c := range n.ChildNodes() { + if !yield(c) || !c.descendants(yield) { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go index 1350eef22..77741a195 100644 --- a/vendor/golang.org/x/net/html/node.go +++ b/vendor/golang.org/x/net/html/node.go @@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode} // that it looks like "a maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 000000000..e3784123c --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 000000000..060fd6c64 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f3..7688c356b 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3ec..832414b45 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -336,7 +306,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -353,12 +323,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -440,13 +429,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +447,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +485,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +526,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +563,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +603,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +623,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +634,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +656,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +920,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -937,18 +934,18 @@ func (sc *serverConn) serve() { sc.writeFrame(FrameWriteRequest{ write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, }, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +965,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +990,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1022,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1046,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1062,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1102,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1355,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1633,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -2160,7 +2204,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -2855,6 +2899,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } @@ -3301,7 +3350,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 61f511f97..f5968f440 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -203,6 +202,20 @@ func (t *Transport) markNewGoroutine() { } } +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() + } + return time.Now() +} + +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) + } + return time.Since(when) +} + // newTimer creates a new time.Timer, or a synthetic timer in tests. func (t *Transport) newTimer(d time.Duration) timer { if t.transportTestHooks != nil { @@ -227,40 +240,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -296,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -308,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -339,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -370,11 +388,22 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration + + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -432,12 +461,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -499,6 +528,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +538,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -554,6 +571,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -586,7 +605,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -597,7 +623,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -622,6 +648,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -640,9 +682,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -758,44 +801,37 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), + } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,24 +843,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize if cs, ok := c.(connectionStater); ok { @@ -834,11 +869,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -848,8 +881,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -867,7 +900,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -995,7 +1028,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -1027,16 +1060,38 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -1049,7 +1104,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1613,6 +1668,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1637,16 +1693,40 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1668,12 +1748,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -2199,7 +2284,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2215,10 +2300,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2278,7 +2363,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2302,6 +2386,24 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + const unusedWaitTime = 5 * time.Second + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2345,7 +2447,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) @@ -2529,15 +2631,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -3081,6 +3202,11 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3103,13 +3229,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } @@ -3263,7 +3396,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 000000000..b2de21161 --- /dev/null +++ b/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398a..6ff6bee7e 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 923a5780e..ac76165ce 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -8,7 +8,7 @@ // This package currently lacks some features found in an alternative // and more actively maintained WebSocket package: // -// https://pkg.go.dev/nhooyr.io/websocket +// https://pkg.go.dev/github.com/coder/websocket package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c20..48dbb9d84 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb33217..109997d77 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s new file mode 100644 index 000000000..ec2acfe54 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go new file mode 100644 index 000000000..b838cb9e9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +package cpu + +// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl +// call (see issue 43089). It also restricts AVX512 support for Darwin to +// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). +// +// Background: +// Darwin implements a special mechanism to economize on thread state when +// AVX512 specific registers are not in use. This scheme minimizes state when +// preempting threads that haven't yet used any AVX512 instructions, but adds +// special requirements to check for AVX512 hardware support at runtime (e.g. +// via sysctl call or commpage inspection). See issue 43089 and link below for +// full background: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 +// +// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 +// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption +// of the AVX512 mask registers (K0-K7) upon signal return. For this reason +// AVX512 is considered unsafe to use on Darwin for kernel versions prior to +// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. +func darwinSupportsAVX512() bool { + return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) +} + +// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + err := darwinOSRelease(&release) + if err != nil { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index 910728fb1..32a44514e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -6,10 +6,10 @@ package cpu -// cpuid is implemented in cpu_x86.s for gc compiler +// cpuid is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s similarity index 94% rename from vendor/golang.org/x/sys/cpu/cpu_x86.s rename to vendor/golang.org/x/sys/cpu/cpu_gc_x86.s index 7d7ba33ef..ce208ce6d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s @@ -18,7 +18,7 @@ TEXT ·cpuid(SB), NOSPLIT, $0-24 RET // func xgetbv() (eax, edx uint32) -TEXT ·xgetbv(SB),NOSPLIT,$0-8 +TEXT ·xgetbv(SB), NOSPLIT, $0-8 MOVL $0, CX XGETBV MOVL AX, eax+0(FP) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 99c60fe9f..170d21ddf 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -23,9 +23,3 @@ func xgetbv() (eax, edx uint32) { gccgoXgetbv(&a, &d) return a, d } - -// gccgo doesn't build on Darwin, per: -// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 -func darwinSupportsAVX512() bool { - return false -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 08f35ea17..f1caf0f78 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -110,7 +110,6 @@ func doinit() { ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) ARM64.HasDIT = isSet(hwCap, hwcap_DIT) - // HWCAP2 feature bits ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go new file mode 100644 index 000000000..a0fd7e2f7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) + +package cpu + +func darwinSupportsAVX512() bool { + panic("only implemented for gc && amd64 && darwin") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index c29f5e4c5..600a68078 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -92,10 +92,8 @@ func archInit() { osSupportsAVX = isSet(1, eax) && isSet(2, eax) if runtime.GOOS == "darwin" { - // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. - // Since users can't rely on mask register contents, let's not advertise AVX-512 support. - // See issue 49233. - osSupportsAVX512 = false + // Darwin requires special AVX512 checks, see cpu_darwin_x86.go + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() } else { // Check if OPMASK and ZMM registers have OS support. osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go new file mode 100644 index 000000000..4d0888b0c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go @@ -0,0 +1,98 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on Darwin without depending on x/sys/unix. + +//go:build darwin && amd64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type _C_int int32 + +// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 +func darwinOSRelease(release *[256]byte) error { + // from x/sys/unix/zerrors_openbsd_amd64.go + const ( + CTL_KERN = 0x1 + KERN_OSRELEASE = 0x2 + ) + + mib := []_C_int{CTL_KERN, KERN_OSRELEASE} + n := unsafe.Sizeof(*release) + + return sysctl(mib, &release[0], &n, nil, 0) +} + +type Errno = syscall.Errno + +var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. + +// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + if _, _, err := syscall_syscall6( + libc_sysctl_trampoline_addr, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + ); err != 0 { + return err + } + + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e1..6e08a76a7 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680eab..7ca4fa12a 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index e14b766a3..6ab02b6c3 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -158,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -256,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -527,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || @@ -656,7 +668,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -666,7 +678,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef2..6f15ba1ea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 3f1d3d4cb..230a94549 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1818,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) @@ -1959,7 +2002,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c75..745e5c7e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e98451..dd2262a40 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a28894..8cf3670bd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac1..7bf5c04bb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 000000000..07ac8e09d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 000000000..297e97bce --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 01a70b246..ccba391c9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -495,6 +495,7 @@ const ( BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -1922,6 +1923,7 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2187,7 +2189,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2356,9 +2358,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2431,6 +2435,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2620,6 +2625,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -2933,11 +2960,12 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3210,6 +3238,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3226,6 +3255,7 @@ const ( STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3624,6 +3654,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 684a5168d..0c00cb3f3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -232,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 61d74b592..dfb364554 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -232,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a28c9e3e8..d46dcf78a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ab5d1fe8e..3af3248a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -235,6 +240,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index c523090e7..292bcf028 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -233,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 01e6ea780..782b7110f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 7aa610b1e..84973fd92 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 92af771b4..6d9cbc3b2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index b27ef5e6f..5f9fedbce 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 237a2cefb..bb0026ee0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -232,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 4a5c555a3..46120db5c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -232,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index a02fb49a5..5c951634f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -232,6 +237,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e26a7c61b..11a84d5af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index c48f7c210..f78c4617c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +234,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index ad4b9aace..aeb777c34 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -155,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -234,6 +239,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1bc1a5adb..5cc1e8eb2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { @@ -971,23 +981,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index d3e38f681..f485dbf45 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 6c778c232..1893e2fe8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 37281cf51..16a4017da 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9889f6a55..a5459e766 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 9f2550dc3..8daaf3faf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,31 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - Subvol uint64 - _ [11]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -516,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -557,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -1724,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1768,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1797,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1829,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1897,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1949,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -3766,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3806,7 +3842,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2c ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3951,7 +3987,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -4082,6 +4118,106 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Rsv [3]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 @@ -4609,7 +4745,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5213,7 +5349,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af46..2e5d5a443 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fba..4e613cf63 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 5cee9a314..4510bfc3f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -725,20 +725,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -894,6 +886,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1685,13 +1682,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 7b97a154c..51311e205 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2203,6 +2203,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 4c2e1bdc0..6f5252880 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -181,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -1606,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1638,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) diff --git a/vendor/golang.org/x/term/README.md b/vendor/golang.org/x/term/README.md index d03d0aefe..05ff623f9 100644 --- a/vendor/golang.org/x/term/README.md +++ b/vendor/golang.org/x/term/README.md @@ -4,16 +4,13 @@ This repository provides Go terminal and console support packages. -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/term`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/term`. - ## Report Issues / Send Patches This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/term. The main issue tracker for the term repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the +https://go.dev/issues. Prefix your issue with "x/term:" in the subject line, so it is easy to find. diff --git a/vendor/golang.org/x/text/internal/catmsg/codec.go b/vendor/golang.org/x/text/internal/catmsg/codec.go index 49c9fc978..547802b0f 100644 --- a/vendor/golang.org/x/text/internal/catmsg/codec.go +++ b/vendor/golang.org/x/text/internal/catmsg/codec.go @@ -257,7 +257,7 @@ func (d *Decoder) setError(err error) { // Language returns the language in which the message is being rendered. // // The destination language may be a child language of the language used for -// encoding. For instance, a decoding language of "pt-PT"" is consistent with an +// encoding. For instance, a decoding language of "pt-PT" is consistent with an // encoding language of "pt". func (d *Decoder) Language() language.Tag { return d.tag } diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f493..93a798ab6 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index bb2966e3b..8f9e592f8 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) } func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 29846df22..0e72d8537 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { + if fd.HasPresence() { if m.skipNull { continue } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 4b177c820..e9fe10394 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa switch tok.Kind() { case json.ObjectClose: if !found { - return d.newError(tok.Pos(), `missing "value" field`) + // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type, + // for compatibility with other proto runtimes that have interpreted the spec differently. + if m.Descriptor().FullName() != genid.Empty_message_fullname { + return d.newError(tok.Pos(), `missing "value" field`) + } } return nil diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c8..024ffebd3 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index ff6a38360add36f53d48bb0863b701696e0d7b2d..2c0693d7abbf532f021dafc96e7568f57214b8e7 100644 GIT binary patch literal 99 zcmd;*m3YRk#C*w)K}(o}QGiK;Nr72|(SYfa9SaAe1S6NM#B;bblK@aefe9$h2$E(1 dOTS=O5(H{Ql40Ut&|548XQ literal 93 zcmd;*mUzal#C*w)K}(Q>QGiK;Nr72|(SYfa9TNv5m$bxlxFnMRqXeS@6Ht;7B*_4j Ve8H{+(u69m1u{(G8N0>{b^xZ!4_5#H diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 029a6a12d..bf1aba0e8 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -5,9 +5,14 @@ // Package editionssupport defines constants for editions that are supported. package editionssupport -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" +import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 Maximum = descriptorpb.Edition_EDITION_2023 + + // MaximumKnown is the maximum edition that is known to Go Protobuf, but not + // declared as supported. In other words: end users cannot use it, but + // testprotos inside Go Protobuf can. + MaximumKnown = descriptorpb.Edition_EDITION_2024 ) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40b..f32529856 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -32,6 +32,7 @@ const ( EditionProto2 Edition = 998 EditionProto3 Edition = 999 Edition2023 Edition = 1000 + Edition2024 Edition = 1001 EditionUnsupported Edition = 100000 ) @@ -77,28 +78,42 @@ type ( Locations SourceLocations } + // EditionFeatures is a frequently-instantiated struct, so please take care + // to minimize padding when adding new fields to this struct (add them in + // the right place/order). EditionFeatures struct { + // StripEnumPrefix determines if the plugin generates enum value + // constants as-is, with their prefix stripped, or both variants. + StripEnumPrefix int + // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED // https://protobuf.dev/editions/features/#field_presence IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED // https://protobuf.dev/editions/features/#repeated_field_encoding IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY // https://protobuf.dev/editions/features/#utf8_validation IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED // https://protobuf.dev/editions/features/#message_encoding IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the // UnmarshalJSON([]byte) error method for enums. GenerateLegacyUnmarshalJSON bool @@ -258,6 +273,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +367,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +442,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +483,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b0..d2f549497 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8d..67a51b327 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356b..7611796e8 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -32,6 +32,10 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeVarint(b) b = b[m:] parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + case genid.GoFeatures_StripEnumPrefix_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.StripEnumPrefix = int(v) default: panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) } @@ -68,7 +72,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd0121..d9b9d916a 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b4..09792d96f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,42 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 + GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix" + GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix" +) + +// Enum values for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3 +) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02ff..bef5a25fb 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b85..9404270de 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a20..0d5b546e0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e44..7c1f66c8c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb73..78be9df34 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577bd..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e23..077712c2c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55a..f72ddd882 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd21224..6254f5de4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 000000000..9f6c32a7d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee6..b6849d669 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d45..741b5ed29 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a2..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d8..79e186667 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f3338..000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd09..832a7988f 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2ba..1ffddf687 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f686..62a52a40a 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 + Minor = 35 Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b03..c36d4a9cd 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f2928..78445d116 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 8fbecb4f5..69a050509 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,8 @@ package protodesc import ( + "strings" + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" @@ -102,13 +104,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { - return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) - } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + // Allow cmd/protoc-gen-go/testdata to use any edition for easier + // testing of upcoming edition features. + if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } + } f.L1.Package = protoreflect.FullName(fd.GetPackage()) if !f.L1.Package.IsValid() && f.L1.Package != "" { return nil, errors.New("invalid package: %q", f.L1.Package) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 856175542..ebcb4a8ab 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() + f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() } @@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 804830eda..d0aeab958 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} @@ -43,6 +43,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { return descriptorpb.Edition_EDITION_PROTO3 case filedesc.Edition2023: return descriptorpb.Edition_EDITION_2023 + case filedesc.Edition2024: + return descriptorpb.Edition_EDITION_2024 default: panic(fmt.Sprintf("unknown value for edition: %v", ed)) } @@ -127,6 +129,9 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { parentFS.GenerateLegacyUnmarshalJSON = *luje } + if sep := goFeatures.StripEnumPrefix; sep != nil { + parentFS.StripEnumPrefix = int(*sep) + } } return parentFS diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6eb..742cb518c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2af..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ead..0015fcb35 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d386990..479527b58 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d8..246156561 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -166,3 +169,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb075..6dea75cd5 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1217,11 +1217,9 @@ type FileDescriptorSet struct { func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1291,11 +1289,9 @@ type FileDescriptorProto struct { func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1434,11 +1430,9 @@ type DescriptorProto struct { func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1561,11 +1555,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct { func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct { func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct { func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct { func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct { func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2082,11 +2064,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2267,11 +2247,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2534,11 +2512,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2707,11 +2683,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2849,11 +2823,9 @@ type OneofOptions struct { func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2929,11 +2901,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3026,11 +2996,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3115,11 +3083,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3192,11 +3158,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3274,11 +3238,9 @@ type UninterpretedOption struct { func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3375,11 +3337,9 @@ type FeatureSet struct { func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct { func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3578,11 +3536,9 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct { func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct { func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index a2ca940c5..5067b89e9 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -18,22 +18,83 @@ import ( sync "sync" ) +type GoFeatures_StripEnumPrefix int32 + +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3 +) + +// Enum value maps for GoFeatures_StripEnumPrefix. +var ( + GoFeatures_StripEnumPrefix_name = map[int32]string{ + 0: "STRIP_ENUM_PREFIX_UNSPECIFIED", + 1: "STRIP_ENUM_PREFIX_KEEP", + 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH", + 3: "STRIP_ENUM_PREFIX_STRIP", + } + GoFeatures_StripEnumPrefix_value = map[string]int32{ + "STRIP_ENUM_PREFIX_UNSPECIFIED": 0, + "STRIP_ENUM_PREFIX_KEEP": 1, + "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2, + "STRIP_ENUM_PREFIX_STRIP": 3, + } +) + +func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix { + p := new(GoFeatures_StripEnumPrefix) + *p = x + return p +} + +func (x GoFeatures_StripEnumPrefix) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor() +} + +func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[0] +} + +func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_StripEnumPrefix(num) + return nil +} + +// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead. +func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0} +} + type GoFeatures struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Whether or not to generate the deprecated UnmarshalJSON method for enums. - LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` + StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"` } func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -44,7 +105,7 @@ func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -66,6 +127,13 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { return false } +func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix { + if x != nil && x.StripEnumPrefix != nil { + return *x.StripEnumPrefix + } + return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED +} + var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FeatureSet)(nil), @@ -90,7 +158,7 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x03, 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, @@ -103,14 +171,31 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, + 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, + 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, + 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, + 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, + 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, + 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, + 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, + 0x0a, 0x16, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, + 0x46, 0x49, 0x58, 0x5f, 0x4b, 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, + 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, + 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, + 0x1b, 0x0a, 0x17, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, + 0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, + 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, + 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( @@ -125,19 +210,22 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { return file_google_protobuf_go_features_proto_rawDescData } +var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_protobuf_go_features_proto_goTypes = []any{ - (*GoFeatures)(nil), // 0: pb.GoFeatures - (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet + (GoFeatures_StripEnumPrefix)(0), // 0: pb.GoFeatures.StripEnumPrefix + (*GoFeatures)(nil), // 1: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 2: google.protobuf.FeatureSet } var file_google_protobuf_go_features_proto_depIdxs = []int32{ - 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet - 0, // 1: pb.go:type_name -> pb.GoFeatures - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix + 2, // 1: pb.go:extendee -> google.protobuf.FeatureSet + 1, // 2: pb.go:type_name -> pb.GoFeatures + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_google_protobuf_go_features_proto_init() } @@ -145,32 +233,19 @@ func file_google_protobuf_go_features_proto_init() { if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 1, NumExtensions: 1, NumServices: 0, }, GoTypes: file_google_protobuf_go_features_proto_goTypes, DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + EnumInfos: file_google_protobuf_go_features_proto_enumTypes, MessageInfos: file_google_protobuf_go_features_proto_msgTypes, ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d3..87da199a3 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd91..b99d4d241 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -245,11 +245,9 @@ func (x *Duration) check() uint { func (x *Duration) Reset() { *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duration) String() string { @@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {} func (x *Duration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,20 +337,6 @@ func file_google_protobuf_duration_proto_init() { if File_google_protobuf_duration_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index d87b4fb82..1761bc9c6 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -55,11 +55,9 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_empty_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -70,7 +68,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_empty_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,20 +129,6 @@ func file_google_protobuf_empty_proto_init() { if File_google_protobuf_empty_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index ac1e91bb6..19de8d371 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -467,11 +467,9 @@ func rangeFields(path string, f func(field string) bool) bool { func (x *FieldMask) Reset() { *x = FieldMask{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldMask) String() string { @@ -482,7 +480,7 @@ func (*FieldMask) ProtoMessage() {} func (x *FieldMask) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -553,20 +551,6 @@ func file_google_protobuf_field_mask_proto_init() { if File_google_protobuf_field_mask_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FieldMask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d45361cbc..8f206a661 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -120,6 +120,7 @@ package structpb import ( base64 "encoding/base64" + json "encoding/json" protojson "google.golang.org/protobuf/encoding/protojson" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -233,11 +234,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error { func (x *Struct) Reset() { *x = Struct{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Struct) String() string { @@ -248,7 +247,7 @@ func (*Struct) ProtoMessage() {} func (x *Struct) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -296,19 +295,20 @@ type Value struct { // NewValue constructs a Value from a general-purpose Go interface. // -// ╔════════════════════════╤════════════════════════════════════════════╗ -// ║ Go type │ Conversion ║ -// ╠════════════════════════╪════════════════════════════════════════════╣ -// ║ nil │ stored as NullValue ║ -// ║ bool │ stored as BoolValue ║ -// ║ int, int32, int64 │ stored as NumberValue ║ -// ║ uint, uint32, uint64 │ stored as NumberValue ║ -// ║ float32, float64 │ stored as NumberValue ║ -// ║ string │ stored as StringValue; must be valid UTF-8 ║ -// ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]any │ stored as StructValue ║ -// ║ []any │ stored as ListValue ║ -// ╚════════════════════════╧════════════════════════════════════════════╝ +// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ json.Number │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. @@ -320,12 +320,20 @@ func NewValue(v any) (*Value, error) { return NewBoolValue(v), nil case int: return NewNumberValue(float64(v)), nil + case int8: + return NewNumberValue(float64(v)), nil + case int16: + return NewNumberValue(float64(v)), nil case int32: return NewNumberValue(float64(v)), nil case int64: return NewNumberValue(float64(v)), nil case uint: return NewNumberValue(float64(v)), nil + case uint8: + return NewNumberValue(float64(v)), nil + case uint16: + return NewNumberValue(float64(v)), nil case uint32: return NewNumberValue(float64(v)), nil case uint64: @@ -334,6 +342,12 @@ func NewValue(v any) (*Value, error) { return NewNumberValue(float64(v)), nil case float64: return NewNumberValue(float64(v)), nil + case json.Number: + n, err := v.Float64() + if err != nil { + return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err) + } + return NewNumberValue(n), nil case string: if !utf8.ValidString(v) { return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) @@ -441,11 +455,9 @@ func (x *Value) UnmarshalJSON(b []byte) error { func (x *Value) Reset() { *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Value) String() string { @@ -456,7 +468,7 @@ func (*Value) ProtoMessage() {} func (x *Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -613,11 +625,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error { func (x *ListValue) Reset() { *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListValue) String() string { @@ -628,7 +638,7 @@ func (*ListValue) ProtoMessage() {} func (x *ListValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -742,44 +752,6 @@ func file_google_protobuf_struct_proto_init() { if File_google_protobuf_struct_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Struct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645b..0d20722d7 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -254,11 +254,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index e473f826a..006060e56 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -69,11 +69,9 @@ func Double(v float64) *DoubleValue { func (x *DoubleValue) Reset() { *x = DoubleValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DoubleValue) String() string { @@ -84,7 +82,7 @@ func (*DoubleValue) ProtoMessage() {} func (x *DoubleValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -125,11 +123,9 @@ func Float(v float32) *FloatValue { func (x *FloatValue) Reset() { *x = FloatValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FloatValue) String() string { @@ -140,7 +136,7 @@ func (*FloatValue) ProtoMessage() {} func (x *FloatValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -181,11 +177,9 @@ func Int64(v int64) *Int64Value { func (x *Int64Value) Reset() { *x = Int64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int64Value) String() string { @@ -196,7 +190,7 @@ func (*Int64Value) ProtoMessage() {} func (x *Int64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -237,11 +231,9 @@ func UInt64(v uint64) *UInt64Value { func (x *UInt64Value) Reset() { *x = UInt64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt64Value) String() string { @@ -252,7 +244,7 @@ func (*UInt64Value) ProtoMessage() {} func (x *UInt64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -293,11 +285,9 @@ func Int32(v int32) *Int32Value { func (x *Int32Value) Reset() { *x = Int32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int32Value) String() string { @@ -308,7 +298,7 @@ func (*Int32Value) ProtoMessage() {} func (x *Int32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -349,11 +339,9 @@ func UInt32(v uint32) *UInt32Value { func (x *UInt32Value) Reset() { *x = UInt32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt32Value) String() string { @@ -364,7 +352,7 @@ func (*UInt32Value) ProtoMessage() {} func (x *UInt32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -405,11 +393,9 @@ func Bool(v bool) *BoolValue { func (x *BoolValue) Reset() { *x = BoolValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BoolValue) String() string { @@ -420,7 +406,7 @@ func (*BoolValue) ProtoMessage() {} func (x *BoolValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,11 +447,9 @@ func String(v string) *StringValue { func (x *StringValue) Reset() { *x = StringValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringValue) String() string { @@ -476,7 +460,7 @@ func (*StringValue) ProtoMessage() {} func (x *StringValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -517,11 +501,9 @@ func Bytes(v []byte) *BytesValue { func (x *BytesValue) Reset() { *x = BytesValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BytesValue) String() string { @@ -532,7 +514,7 @@ func (*BytesValue) ProtoMessage() {} func (x *BytesValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -629,116 +611,6 @@ func file_google_protobuf_wrappers_proto_init() { if File_google_protobuf_wrappers_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*DoubleValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FloatValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Int64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*UInt64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Int32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*UInt32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*BoolValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*StringValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*BytesValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index a326e7335..6da894919 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -1170,7 +1170,7 @@ func AuthorizeClientBearerToken(loopback *restclient.Config, authn *Authenticati tokens[privilegedLoopbackToken] = &user.DefaultInfo{ Name: user.APIServerUser, UID: uid, - Groups: []string{user.SystemPrivilegedGroup}, + Groups: []string{user.AllAuthenticated, user.SystemPrivilegedGroup}, } tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens, authn.APIAudiences) diff --git a/vendor/k8s.io/apiserver/pkg/server/options/tracing.go b/vendor/k8s.io/apiserver/pkg/server/options/tracing.go index d56e7df51..7be62fad0 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/tracing.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/tracing.go @@ -23,7 +23,9 @@ import ( "net" "github.com/spf13/pflag" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/semconv/v1.12.0" "google.golang.org/grpc" @@ -48,6 +50,12 @@ var ( codecs = serializer.NewCodecFactory(cfgScheme) ) +func init() { + // Prevent memory leak from OTel metrics, which we don't use: + // https://github.com/open-telemetry/opentelemetry-go-contrib/issues/5190 + otel.SetMeterProvider(noop.NewMeterProvider()) +} + func init() { install.Install(cfgScheme) } diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go b/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go index d73c8e62c..f53fdb832 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/resource_encoding_config.go @@ -130,13 +130,24 @@ func emulatedStorageVersion(binaryVersionOfResource schema.GroupVersion, example gvks, _, err := scheme.ObjectKinds(example) if err != nil { return schema.GroupVersion{}, err - } else if len(gvks) == 0 { - // Probably shouldn't happen if err is non-nil + } + + var gvk schema.GroupVersionKind + for _, item := range gvks { + if item.Group != binaryVersionOfResource.Group { + continue + } + + gvk = item + break + } + + if len(gvk.Kind) == 0 { return schema.GroupVersion{}, fmt.Errorf("object %T has no GVKs registered in scheme", example) } // VersionsForGroupKind returns versions in priority order - versions := scheme.VersionsForGroupKind(schema.GroupKind{Group: gvks[0].Group, Kind: gvks[0].Kind}) + versions := scheme.VersionsForGroupKind(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}) compatibilityVersion := effectiveVersion.MinCompatibilityVersion() @@ -148,7 +159,7 @@ func emulatedStorageVersion(binaryVersionOfResource schema.GroupVersion, example gvk := schema.GroupVersionKind{ Group: gv.Group, Version: gv.Version, - Kind: gvks[0].Kind, + Kind: gvk.Kind, } exampleOfGVK, err := scheme.New(gvk) diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go index ad01c5a5d..f4ccc62f6 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -245,7 +245,7 @@ func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource, ex var err error if backwardCompatibleInterface, ok := s.ResourceEncodingConfig.(CompatibilityResourceEncodingConfig); ok { - codecConfig.StorageVersion, err = backwardCompatibleInterface.BackwardCompatibileStorageEncodingFor(groupResource, example) + codecConfig.StorageVersion, err = backwardCompatibleInterface.BackwardCompatibileStorageEncodingFor(chosenStorageResource, example) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 5789e67ab..1b758ab25 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -22,7 +22,7 @@ import ( "strings" openapi_v2 "github.com/google/gnostic-models/openapiv2" - "gopkg.in/yaml.v2" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) func newSchemaError(path *Path, format string, a ...interface{}) error { diff --git a/vendor/k8s.io/utils/lru/lru.go b/vendor/k8s.io/utils/lru/lru.go index 47f135281..40c22ece1 100644 --- a/vendor/k8s.io/utils/lru/lru.go +++ b/vendor/k8s.io/utils/lru/lru.go @@ -16,6 +16,7 @@ limitations under the License. package lru import ( + "fmt" "sync" groupcache "k8s.io/utils/internal/third_party/forked/golang/golang-lru" @@ -44,6 +45,17 @@ func NewWithEvictionFunc(size int, f EvictionFunc) *Cache { return c } +// SetEvictionFunc updates the eviction func +func (c *Cache) SetEvictionFunc(f EvictionFunc) error { + c.lock.Lock() + defer c.lock.Unlock() + if c.cache.OnEvicted != nil { + return fmt.Errorf("lru cache eviction function is already set") + } + c.cache.OnEvicted = f + return nil +} + // Add adds a value to the cache. func (c *Cache) Add(key Key, value interface{}) { c.lock.Lock() diff --git a/vendor/modules.txt b/vendor/modules.txt index 66145ce52..0aa972e2e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,7 +7,7 @@ github.com/antlr4-go/antlr/v4 # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.52.6 +# github.com/aws/aws-sdk-go v1.55.5 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/auth/bearer @@ -89,9 +89,10 @@ github.com/felixge/fgprof # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/fxamacker/cbor/v2 v2.7.0 ## explicit; go 1.17 github.com/fxamacker/cbor/v2 @@ -130,10 +131,6 @@ github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.4 ## explicit; go 1.17 github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/timestamp # github.com/google/cel-go v0.20.1 ## explicit; go 1.18 github.com/google/cel-go/cel @@ -158,7 +155,7 @@ github.com/google/cel-go/interpreter github.com/google/cel-go/interpreter/functions github.com/google/cel-go/parser github.com/google/cel-go/parser/gen -# github.com/google/gnostic-models v0.6.8 +# github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 ## explicit; go 1.18 github.com/google/gnostic-models/compiler github.com/google/gnostic-models/extensions @@ -176,13 +173,13 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 -## explicit; go 1.19 +# github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db +## explicit; go 1.22 github.com/google/pprof/profile # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/gophercloud/gophercloud/v2 v2.1.1 +# github.com/gophercloud/gophercloud/v2 v2.2.0 ## explicit; go 1.22 github.com/gophercloud/gophercloud/v2 github.com/gophercloud/gophercloud/v2/openstack @@ -203,6 +200,8 @@ github.com/gophercloud/utils/v2/gnocchi github.com/gophercloud/utils/v2/internal github.com/gophercloud/utils/v2/openstack/clientconfig github.com/gophercloud/utils/v2/openstack/compute/v2/availabilityzones +# github.com/gorilla/websocket v1.5.3 +## explicit; go 1.12 # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus @@ -226,6 +225,15 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go +# github.com/klauspost/compress v1.17.11 +## explicit; go 1.21 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/kubernetes-csi/external-snapshotter/client/v6 v6.3.0 ## explicit; go 1.20 github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumegroupsnapshot/v1alpha1 @@ -234,6 +242,9 @@ github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/scheme github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumegroupsnapshot/v1alpha1 github.com/kubernetes-csi/external-snapshotter/client/v6/clientset/versioned/typed/volumesnapshot/v1 +# github.com/kylelemons/godebug v1.1.0 +## explicit; go 1.11 +github.com/kylelemons/godebug/diff # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer @@ -251,19 +262,22 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/onsi/gomega v1.33.1 -## explicit; go 1.20 +# github.com/onsi/ginkgo/v2 v2.21.0 +## explicit; go 1.22.0 +# github.com/onsi/gomega v1.36.0 +## explicit; go 1.22 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal github.com/onsi/gomega/internal/gutil github.com/onsi/gomega/matchers +github.com/onsi/gomega/matchers/internal/miter github.com/onsi/gomega/matchers/support/goraph/bipartitegraph github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/openshift/api v0.0.0-20241121204516-053bb8a33f6d +# github.com/openshift/api v0.0.0-20241126141851-807d6dfccb05 ## explicit; go 1.22.0 github.com/openshift/api github.com/openshift/api/annotations @@ -382,10 +396,9 @@ github.com/openshift/client-go/operator/informers/externalversions/operator/v1 github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1 github.com/openshift/client-go/operator/listers/operator/v1 github.com/openshift/client-go/operator/listers/operator/v1alpha1 -# github.com/openshift/hypershift v0.1.39 +# github.com/openshift/hypershift v0.1.51 ## explicit; go 1.22.0 github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1 -github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1 github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1 github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1 github.com/openshift/hypershift/client/clientset/clientset @@ -393,8 +406,6 @@ github.com/openshift/hypershift/client/clientset/clientset/fake github.com/openshift/hypershift/client/clientset/clientset/scheme github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1 github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1/fake -github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1 -github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1 github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1/fake github.com/openshift/hypershift/client/clientset/clientset/typed/scheduling/v1alpha1 @@ -403,21 +414,17 @@ github.com/openshift/hypershift/client/informers/externalversions github.com/openshift/hypershift/client/informers/externalversions/certificates github.com/openshift/hypershift/client/informers/externalversions/certificates/v1alpha1 github.com/openshift/hypershift/client/informers/externalversions/hypershift -github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1alpha1 github.com/openshift/hypershift/client/informers/externalversions/hypershift/v1beta1 github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces github.com/openshift/hypershift/client/informers/externalversions/scheduling github.com/openshift/hypershift/client/informers/externalversions/scheduling/v1alpha1 github.com/openshift/hypershift/client/listers/certificates/v1alpha1 -github.com/openshift/hypershift/client/listers/hypershift/v1alpha1 github.com/openshift/hypershift/client/listers/hypershift/v1beta1 github.com/openshift/hypershift/client/listers/scheduling/v1alpha1 -# github.com/openshift/hypershift/api v0.0.0-20240725153211-8b880bdd20d1 +# github.com/openshift/hypershift/api v0.0.0-20241126143550-da24e17fcccb ## explicit; go 1.22.0 github.com/openshift/hypershift/api/certificates github.com/openshift/hypershift/api/certificates/v1alpha1 -github.com/openshift/hypershift/api/hypershift -github.com/openshift/hypershift/api/hypershift/v1alpha1 github.com/openshift/hypershift/api/hypershift/v1beta1 github.com/openshift/hypershift/api/ibmcapi github.com/openshift/hypershift/api/scheduling @@ -471,8 +478,10 @@ github.com/pkg/errors # github.com/pkg/profile v1.7.0 ## explicit; go 1.13 github.com/pkg/profile -# github.com/prometheus/client_golang v1.19.1 +# github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal @@ -483,8 +492,8 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.55.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.60.1 +## explicit; go 1.21 github.com/prometheus/common/expfmt github.com/prometheus/common/model # github.com/prometheus/procfs v0.15.1 @@ -510,7 +519,7 @@ github.com/stoewer/go-strcase # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 -# go.etcd.io/etcd/api/v3 v3.5.14 +# go.etcd.io/etcd/api/v3 v3.5.15 ## explicit; go 1.21 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb @@ -518,7 +527,7 @@ go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.14 +# go.etcd.io/etcd/client/pkg/v3 v3.5.15 ## explicit; go 1.21 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil @@ -526,7 +535,7 @@ go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.14 +# go.etcd.io/etcd/client/v3 v3.5.15 ## explicit; go 1.21 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials @@ -541,8 +550,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/inte go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -569,8 +578,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/metric v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop @@ -582,8 +591,8 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/trace v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop @@ -608,7 +617,7 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.27.0 +# golang.org/x/crypto v0.29.0 ## explicit; go 1.20 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 @@ -617,11 +626,11 @@ golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/salsa20/salsa -# golang.org/x/exp v0.0.0-20240707233637-46b078467d37 -## explicit; go 1.20 +# golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f +## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.29.0 +# golang.org/x/net v0.31.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -634,24 +643,24 @@ golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.21.0 +# golang.org/x/oauth2 v0.24.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.8.0 +# golang.org/x/sync v0.9.0 ## explicit; go 1.18 golang.org/x/sync/singleflight -# golang.org/x/sys v0.25.0 +# golang.org/x/sys v0.27.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.24.0 +# golang.org/x/term v0.26.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.18.0 +# golang.org/x/text v0.20.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -682,7 +691,7 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.5.0 +# golang.org/x/time v0.8.0 ## explicit; go 1.18 golang.org/x/time/rate # google.golang.org/genproto v0.0.0-20240709173604-40e1e62336c5 @@ -753,8 +762,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.35.2 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -815,7 +824,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.31.1 +# k8s.io/api v0.31.3 ## explicit; go 1.22.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -875,7 +884,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.31.1 +# k8s.io/apiextensions-apiserver v0.31.3 ## explicit; go 1.22.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -898,7 +907,7 @@ k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensio k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1 -# k8s.io/apimachinery v0.31.1 +# k8s.io/apimachinery v0.31.3 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -962,7 +971,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.31.1 +# k8s.io/apiserver v0.31.3 ## explicit; go 1.22.0 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -1113,7 +1122,7 @@ k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics -# k8s.io/client-go v0.31.1 +# k8s.io/client-go v0.31.3 ## explicit; go 1.22.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -1443,7 +1452,7 @@ k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.31.1 +# k8s.io/component-base v0.31.3 ## explicit; go 1.22.0 k8s.io/component-base/cli k8s.io/component-base/cli/flag @@ -1477,20 +1486,20 @@ k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler k8s.io/klog/v2/internal/verbosity k8s.io/klog/v2/textlogger -# k8s.io/kms v0.31.1 +# k8s.io/kms v0.31.3 ## explicit; go 1.22.0 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-aggregator v0.31.1 +# k8s.io/kube-aggregator v0.31.3 ## explicit; go 1.22.0 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1 k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1 -# k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f +# k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f ## explicit; go 1.20 k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder3 @@ -1511,7 +1520,7 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 +# k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1562,7 +1571,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field sigs.k8s.io/kustomize/kyaml/yaml/merge2 sigs.k8s.io/kustomize/kyaml/yaml/schema sigs.k8s.io/kustomize/kyaml/yaml/walk -# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 +# sigs.k8s.io/structured-merge-diff/v4 v4.4.3 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go index 6d182768d..77ae25116 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go @@ -17,6 +17,8 @@ limitations under the License. package fieldpath import ( + "fmt" + "sigs.k8s.io/structured-merge-diff/v4/value" "sort" "strings" @@ -136,6 +138,198 @@ func (s *Set) EnsureNamedFieldsAreMembers(sc *schema.Schema, tr schema.TypeRef) } } +// MakePrefixMatcherOrDie is the same as PrefixMatcher except it panics if parts can't be +// turned into a SetMatcher. +func MakePrefixMatcherOrDie(parts ...interface{}) *SetMatcher { + result, err := PrefixMatcher(parts...) + if err != nil { + panic(err) + } + return result +} + +// PrefixMatcher creates a SetMatcher that matches all field paths prefixed by the given list of matcher path parts. +// The matcher parts may any of: +// +// - PathElementMatcher - for wildcards, `MatchAnyPathElement()` can be used as well. +// - PathElement - for any path element +// - value.FieldList - for listMap keys +// - value.Value - for scalar list elements +// - string - For field names +// - int - for array indices +func PrefixMatcher(parts ...interface{}) (*SetMatcher, error) { + current := MatchAnySet() // match all field path suffixes + for i := len(parts) - 1; i >= 0; i-- { + part := parts[i] + var pattern PathElementMatcher + switch t := part.(type) { + case PathElementMatcher: + // any path matcher, including wildcard + pattern = t + case PathElement: + // any path element + pattern = PathElementMatcher{PathElement: t} + case *value.FieldList: + // a listMap key + if len(*t) == 0 { + return nil, fmt.Errorf("associative list key type path elements must have at least one key (got zero)") + } + pattern = PathElementMatcher{PathElement: PathElement{Key: t}} + case value.Value: + // a scalar or set-type list element + pattern = PathElementMatcher{PathElement: PathElement{Value: &t}} + case string: + // a plain field name + pattern = PathElementMatcher{PathElement: PathElement{FieldName: &t}} + case int: + // a plain list index + pattern = PathElementMatcher{PathElement: PathElement{Index: &t}} + default: + return nil, fmt.Errorf("unexpected type %T", t) + } + current = &SetMatcher{ + members: []*SetMemberMatcher{{ + Path: pattern, + Child: current, + }}, + } + } + return current, nil +} + +// MatchAnyPathElement returns a PathElementMatcher that matches any path element. +func MatchAnyPathElement() PathElementMatcher { + return PathElementMatcher{Wildcard: true} +} + +// MatchAnySet returns a SetMatcher that matches any set. +func MatchAnySet() *SetMatcher { + return &SetMatcher{wildcard: true} +} + +// NewSetMatcher returns a new SetMatcher. +// Wildcard members take precedent over non-wildcard members; +// all non-wildcard members are ignored if there is a wildcard members. +func NewSetMatcher(wildcard bool, members ...*SetMemberMatcher) *SetMatcher { + sort.Sort(sortedMemberMatcher(members)) + return &SetMatcher{wildcard: wildcard, members: members} +} + +// SetMatcher defines a matcher that matches fields in a Set. +// SetMatcher is structured much like a Set but with wildcard support. +type SetMatcher struct { + // wildcard indicates that all members and children are included in the match. + // If set, the members field is ignored. + wildcard bool + // members provides patterns to match the members of a Set. + // Wildcard members are sorted before non-wildcards and take precedent over + // non-wildcard members. + members sortedMemberMatcher +} + +type sortedMemberMatcher []*SetMemberMatcher + +func (s sortedMemberMatcher) Len() int { return len(s) } +func (s sortedMemberMatcher) Less(i, j int) bool { return s[i].Path.Less(s[j].Path) } +func (s sortedMemberMatcher) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortedMemberMatcher) Find(p PathElementMatcher) (location int, ok bool) { + return sort.Find(len(s), func(i int) int { + return s[i].Path.Compare(p) + }) +} + +// Merge merges s and s2 and returns a SetMatcher that matches all field paths matched by either s or s2. +// During the merge, members of s and s2 with the same PathElementMatcher merged into a single member +// with the children of each merged by calling this function recursively. +func (s *SetMatcher) Merge(s2 *SetMatcher) *SetMatcher { + if s.wildcard || s2.wildcard { + return NewSetMatcher(true) + } + merged := make(sortedMemberMatcher, len(s.members), len(s.members)+len(s2.members)) + copy(merged, s.members) + for _, m := range s2.members { + if i, ok := s.members.Find(m.Path); ok { + // since merged is a shallow copy, do not modify elements in place + merged[i] = &SetMemberMatcher{ + Path: merged[i].Path, + Child: merged[i].Child.Merge(m.Child), + } + } else { + merged = append(merged, m) + } + } + return NewSetMatcher(false, merged...) // sort happens here +} + +// SetMemberMatcher defines a matcher that matches the members of a Set. +// SetMemberMatcher is structured much like the elements of a SetNodeMap, but +// with wildcard support. +type SetMemberMatcher struct { + // Path provides a matcher to match members of a Set. + // If Path is a wildcard, all members of a Set are included in the match. + // Otherwise, if any Path is Equal to a member of a Set, that member is + // included in the match and the children of that member are matched + // against the Child matcher. + Path PathElementMatcher + + // Child provides a matcher to use for the children of matched members of a Set. + Child *SetMatcher +} + +// PathElementMatcher defined a path matcher for a PathElement. +type PathElementMatcher struct { + // Wildcard indicates that all PathElements are matched by this matcher. + // If set, PathElement is ignored. + Wildcard bool + + // PathElement indicates that a PathElement is matched if it is Equal + // to this PathElement. + PathElement +} + +func (p PathElementMatcher) Equals(p2 PathElementMatcher) bool { + return p.Wildcard != p2.Wildcard && p.PathElement.Equals(p2.PathElement) +} + +func (p PathElementMatcher) Less(p2 PathElementMatcher) bool { + if p.Wildcard && !p2.Wildcard { + return true + } else if p2.Wildcard { + return false + } + return p.PathElement.Less(p2.PathElement) +} + +func (p PathElementMatcher) Compare(p2 PathElementMatcher) int { + if p.Wildcard && !p2.Wildcard { + return -1 + } else if p2.Wildcard { + return 1 + } + return p.PathElement.Compare(p2.PathElement) +} + +// FilterIncludeMatches returns a Set with only the field paths that match. +func (s *Set) FilterIncludeMatches(pattern *SetMatcher) *Set { + if pattern.wildcard { + return s + } + + members := PathElementSet{} + for _, m := range s.Members.members { + for _, pm := range pattern.members { + if pm.Path.Wildcard || pm.Path.PathElement.Equals(m) { + members.Insert(m) + break + } + } + } + return &Set{ + Members: members, + Children: *s.Children.FilterIncludeMatches(pattern), + } +} + // Size returns the number of members of the set. func (s *Set) Size() int { return s.Members.Size() + s.Children.Size() @@ -476,6 +670,33 @@ func (s *SetNodeMap) EnsureNamedFieldsAreMembers(sc *schema.Schema, tr schema.Ty } } +// FilterIncludeMatches returns a SetNodeMap with only the field paths that match the matcher. +func (s *SetNodeMap) FilterIncludeMatches(pattern *SetMatcher) *SetNodeMap { + if pattern.wildcard { + return s + } + + var out sortedSetNode + for _, member := range s.members { + for _, c := range pattern.members { + if c.Path.Wildcard || c.Path.PathElement.Equals(member.pathElement) { + childSet := member.set.FilterIncludeMatches(c.Child) + if childSet.Size() > 0 { + out = append(out, setNode{ + pathElement: member.pathElement, + set: childSet, + }) + } + break + } + } + } + + return &SetNodeMap{ + members: out, + } +} + // Iterate calls f for each PathElement in the set. func (s *SetNodeMap) Iterate(f func(PathElement)) { for _, n := range s.members { @@ -503,3 +724,59 @@ func (s *SetNodeMap) Leaves() *SetNodeMap { } return out } + +// Filter defines an interface for excluding field paths from a set. +// NewExcludeSetFilter can be used to create a filter that removes +// specific field paths and all of their children. +// NewIncludeMatcherFilter can be used to create a filter that removes all fields except +// the fields that match a field path matcher. PrefixMatcher and MakePrefixMatcherOrDie +// can be used to define field path patterns. +type Filter interface { + // Filter returns a filtered copy of the set. + Filter(*Set) *Set +} + +// NewExcludeSetFilter returns a filter that removes field paths in the exclude set. +func NewExcludeSetFilter(exclude *Set) Filter { + return excludeFilter{exclude} +} + +// NewExcludeFilterSetMap converts a map of APIVersion to exclude set to a map of APIVersion to exclude filters. +func NewExcludeFilterSetMap(resetFields map[APIVersion]*Set) map[APIVersion]Filter { + result := make(map[APIVersion]Filter) + for k, v := range resetFields { + result[k] = excludeFilter{v} + } + return result +} + +type excludeFilter struct { + excludeSet *Set +} + +func (t excludeFilter) Filter(set *Set) *Set { + return set.RecursiveDifference(t.excludeSet) +} + +// NewIncludeMatcherFilter returns a filter that only includes field paths that match. +// If no matchers are provided, the filter includes all field paths. +// PrefixMatcher and MakePrefixMatcherOrDie can help create basic matcher. +func NewIncludeMatcherFilter(matchers ...*SetMatcher) Filter { + if len(matchers) == 0 { + return includeMatcherFilter{&SetMatcher{wildcard: true}} + } + matcher := matchers[0] + for i := 1; i < len(matchers); i++ { + matcher = matcher.Merge(matchers[i]) + } + + return includeMatcherFilter{matcher} +} + +type includeMatcherFilter struct { + matcher *SetMatcher +} + +func (pf includeMatcherFilter) Filter(set *Set) *Set { + return set.FilterIncludeMatches(pf.matcher) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go index d5a977d60..455818ff8 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -15,7 +15,6 @@ package merge import ( "fmt" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/typed" "sigs.k8s.io/structured-merge-diff/v4/value" @@ -31,7 +30,10 @@ type Converter interface { // UpdateBuilder allows you to create a new Updater by exposing all of // the options and setting them once. type UpdaterBuilder struct { - Converter Converter + Converter Converter + IgnoreFilter map[fieldpath.APIVersion]fieldpath.Filter + + // IgnoredFields provides a set of fields to ignore for each IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set // Stop comparing the new object with old object after applying. @@ -46,6 +48,7 @@ type UpdaterBuilder struct { func (u *UpdaterBuilder) BuildUpdater() *Updater { return &Updater{ Converter: u.Converter, + IgnoreFilter: u.IgnoreFilter, IgnoredFields: u.IgnoredFields, returnInputOnNoop: u.ReturnInputOnNoop, } @@ -60,6 +63,9 @@ type Updater struct { // Deprecated: This will eventually become private. IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + // Deprecated: This will eventually become private. + IgnoreFilter map[fieldpath.APIVersion]fieldpath.Filter + returnInputOnNoop bool } @@ -71,8 +77,19 @@ func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpa return nil, nil, fmt.Errorf("failed to compare objects: %v", err) } - versions := map[fieldpath.APIVersion]*typed.Comparison{ - version: compare.ExcludeFields(s.IgnoredFields[version]), + var versions map[fieldpath.APIVersion]*typed.Comparison + + if s.IgnoredFields != nil && s.IgnoreFilter != nil { + return nil, nil, fmt.Errorf("IgnoreFilter and IgnoreFilter may not both be set") + } + if s.IgnoredFields != nil { + versions = map[fieldpath.APIVersion]*typed.Comparison{ + version: compare.ExcludeFields(s.IgnoredFields[version]), + } + } else { + versions = map[fieldpath.APIVersion]*typed.Comparison{ + version: compare.FilterFields(s.IgnoreFilter[version]), + } } for manager, managerSet := range managers { @@ -102,7 +119,12 @@ func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpa if err != nil { return nil, nil, fmt.Errorf("failed to compare objects: %v", err) } - versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()]) + + if s.IgnoredFields != nil { + versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()]) + } else { + versions[managerSet.APIVersion()] = compare.FilterFields(s.IgnoreFilter[managerSet.APIVersion()]) + } } conflictSet := managerSet.Set().Intersection(compare.Modified.Union(compare.Added)) @@ -154,13 +176,23 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp if _, ok := managers[manager]; !ok { managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false) } + set := managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added) - ignored := s.IgnoredFields[version] - if ignored == nil { - ignored = fieldpath.NewSet() + if s.IgnoredFields != nil && s.IgnoreFilter != nil { + return nil, nil, fmt.Errorf("IgnoreFilter and IgnoreFilter may not both be set") + } + var ignoreFilter fieldpath.Filter + if s.IgnoredFields != nil { + ignoreFilter = fieldpath.NewExcludeSetFilter(s.IgnoredFields[version]) + } else { + ignoreFilter = s.IgnoreFilter[version] + } + if ignoreFilter != nil { + set = ignoreFilter.Filter(set) } + managers[manager] = fieldpath.NewVersionedSet( - managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added).RecursiveDifference(ignored), + set, version, false, ) @@ -189,13 +221,17 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to get field set: %v", err) } - ignored := s.IgnoredFields[version] - if ignored != nil { - set = set.RecursiveDifference(ignored) - // TODO: is this correct. If we don't remove from lastSet pruning might remove the fields? - if lastSet != nil { - lastSet.Set().RecursiveDifference(ignored) - } + if s.IgnoredFields != nil && s.IgnoreFilter != nil { + return nil, nil, fmt.Errorf("IgnoreFilter and IgnoreFilter may not both be set") + } + var ignoreFilter fieldpath.Filter + if s.IgnoredFields != nil { + ignoreFilter = fieldpath.NewExcludeSetFilter(s.IgnoredFields[version]) + } else { + ignoreFilter = s.IgnoreFilter[version] + } + if ignoreFilter != nil { + set = ignoreFilter.Filter(set) } managers[manager] = fieldpath.NewVersionedSet(set, version, true) newObject, err = s.prune(newObject, managers, manager, lastSet) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go index ed483cbbc..5fffa5e2c 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go @@ -72,6 +72,16 @@ func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { return c } +func (c *Comparison) FilterFields(filter fieldpath.Filter) *Comparison { + if filter == nil { + return c + } + c.Removed = filter.Filter(c.Removed) + c.Modified = filter.Filter(c.Modified) + c.Added = filter.Filter(c.Added) + return c +} + type compareWalker struct { lhs value.Value rhs value.Value diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go index 4258ee5ba..0e9f7cc7e 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -19,9 +19,9 @@ package typed import ( "fmt" - yaml "gopkg.in/yaml.v2" "sigs.k8s.io/structured-merge-diff/v4/schema" "sigs.k8s.io/structured-merge-diff/v4/value" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) // YAMLObject is an object encoded in YAML. diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index f0d58d42c..88693b87e 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -19,7 +19,9 @@ package value import ( "bytes" "encoding/json" + "errors" "fmt" + "io" "reflect" "sort" "sync" @@ -184,6 +186,11 @@ func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, er // This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505 // and is intended to replace it. + // Check if the object is a nil pointer. + if sv.Kind() == reflect.Ptr && sv.IsNil() { + // We're done - we don't need to store anything. + return nil, nil + } // Check if the object has a custom string converter and use it if available, since it is much more efficient // than round tripping through json. if converter, ok := e.getUnstructuredConverter(sv); ok { @@ -191,11 +198,6 @@ func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, er } // Check if the object has a custom JSON marshaller/unmarshaller. if marshaler, ok := e.getJsonMarshaler(sv); ok { - if sv.Kind() == reflect.Ptr && sv.IsNil() { - // We're done - we don't need to store anything. - return nil, nil - } - data, err := marshaler.MarshalJSON() if err != nil { return nil, err @@ -379,34 +381,47 @@ const maxDepth = 10000 // unmarshal unmarshals the given data // If v is a *map[string]interface{}, numbers are converted to int64 or float64 func unmarshal(data []byte, v interface{}) error { + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + next := decoder.InputOffset() + if _, err := decoder.Token(); !errors.Is(err, io.EOF) { + tail := bytes.TrimLeft(data[next:], " \t\r\n") + return fmt.Errorf("unexpected trailing data at offset %d", len(data)-len(tail)) + } + + // If the decode succeeds, post-process the object to convert json.Number objects to int64 or float64 switch v := v.(type) { case *map[string]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 return convertMapNumbers(*v, 0) case *[]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 return convertSliceNumbers(*v, 0) + case *interface{}: + return convertInterfaceNumbers(v, 0) + default: - return json.Unmarshal(data, v) + return nil + } +} + +func convertInterfaceNumbers(v *interface{}, depth int) error { + var err error + switch v2 := (*v).(type) { + case json.Number: + *v, err = convertNumber(v2) + case map[string]interface{}: + err = convertMapNumbers(v2, depth+1) + case []interface{}: + err = convertSliceNumbers(v2, depth+1) } + return err } // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go index ea79e3a00..f72e5cd25 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go @@ -23,7 +23,7 @@ import ( "strings" jsoniter "github.com/json-iterator/go" - "gopkg.in/yaml.v2" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) var ( From 9c0402a29f77fd560536e68f95ae9e19012f2a42 Mon Sep 17 00:00:00 2001 From: Mulham Raee Date: Mon, 6 Jan 2025 14:47:14 +0100 Subject: [PATCH 2/2] Support HCP labels --- pkg/driver/common/operator/hooks.go | 45 ++++++++++++++- pkg/driver/common/operator/hooks_test.go | 57 +++++++++++++++++++ .../operator/test_manifests/hcp_labels.yaml | 9 +++ .../test_manifests/hcp_no_labels.yaml | 6 ++ 4 files changed, 116 insertions(+), 1 deletion(-) create mode 100644 pkg/driver/common/operator/test_manifests/hcp_labels.yaml create mode 100644 pkg/driver/common/operator/test_manifests/hcp_no_labels.yaml diff --git a/pkg/driver/common/operator/hooks.go b/pkg/driver/common/operator/hooks.go index cf95cd7fa..7b43091cc 100644 --- a/pkg/driver/common/operator/hooks.go +++ b/pkg/driver/common/operator/hooks.go @@ -39,7 +39,7 @@ func NewDefaultOperatorControllerConfig(flavour generator.ClusterFlavour, c *cli cfg.AddDeploymentHookBuilders(c, withClusterWideProxy, withStandaloneReplicas) } else { // HyperShift - cfg.AddDeploymentHookBuilders(c, withHyperShiftReplicas, withHyperShiftNodeSelector, withHyperShiftControlPlaneImages, withHyperShiftCustomTolerations) + cfg.AddDeploymentHookBuilders(c, withHyperShiftReplicas, withHyperShiftNodeSelector, withHyperShiftLabels, withHyperShiftControlPlaneImages, withHyperShiftCustomTolerations) } return cfg @@ -141,6 +141,49 @@ func getHostedControlPlaneTolerations(hostedControlPlaneLister hypev1beta1lister return tolerations, nil } +// withHyperShiftLabels sets Deployment labels on a HyperShift hosted control-plane. +func withHyperShiftLabels(c *clients.Clients) (dc.DeploymentHookFunc, []factory.Informer) { + hook := func(_ *opv1.OperatorSpec, deployment *appsv1.Deployment) error { + labels, err := getHostedControlLabels( + c.ControlPlaneHypeInformer.Hypershift().V1beta1().HostedControlPlanes().Lister(), + c.ControlPlaneNamespace) + if err != nil { + return err + } + + if deployment.Spec.Template.Labels == nil { + deployment.Spec.Template.Labels = map[string]string{} + } + + for key, value := range labels { + // don't replace existing labels as they are used in the deployment's labelSelector. + if _, exist := deployment.Spec.Template.Labels[key]; !exist { + deployment.Spec.Template.Labels[key] = value + } + } + return nil + } + informers := []factory.Informer{ + c.ControlPlaneHypeInformer.Hypershift().V1beta1().HostedControlPlanes().Informer(), + } + return hook, informers +} + +// getHostedControlLabels returns the labels from the HostedControlPlane CR. +func getHostedControlLabels(hostedControlPlaneLister hypev1beta1listers.HostedControlPlaneLister, namespace string) (map[string]string, error) { + hcp, err := getHostedControlPlane(hostedControlPlaneLister, namespace) + if err != nil { + return nil, err + } + labels := hcp.Spec.Labels + if len(labels) == 0 { + return nil, nil + } + klog.V(4).Infof("Using labels %v", labels) + return labels, nil + +} + // getHostedControlPlane returns the HostedControlPlane CR. func getHostedControlPlane(hostedControlPlaneLister hypev1beta1listers.HostedControlPlaneLister, namespace string) (*hypev1beta1api.HostedControlPlane, error) { list, err := hostedControlPlaneLister.List(labels.Everything()) diff --git a/pkg/driver/common/operator/hooks_test.go b/pkg/driver/common/operator/hooks_test.go index 19dd253e9..d0104efec 100644 --- a/pkg/driver/common/operator/hooks_test.go +++ b/pkg/driver/common/operator/hooks_test.go @@ -146,6 +146,63 @@ func Test_WithHyperShiftNodeSelector(t *testing.T) { } } +func Test_WithHyperShiftLabels(t *testing.T) { + tests := []struct { + name string + hcp *hypev1beta1api.HostedControlPlane + expectedLabels map[string]string + }{ + { + name: "no labels", + hcp: getTestHostedControlPlane("hcp_no_labels.yaml"), + expectedLabels: nil, + }, + { + name: "labels", + hcp: getTestHostedControlPlane("hcp_labels.yaml"), + expectedLabels: map[string]string{ + "foo": "bar", + "baz": "", + }, + }, + { + name: "existing labels should not be replaced", + hcp: getTestHostedControlPlane("hcp_no_labels.yaml"), + expectedLabels: map[string]string{ + "app": "aws-ebs-csi-driver-controller", + "hypershift.openshift.io/hosted-control-plane": "clusters-test", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cr := clients.GetFakeOperatorCR() + c := clients.NewFakeClients("clusters-test", cr) + // Arrange: inject HostedControlPlane to the clients + c.ControlPlaneHypeClient.(*fakehype.Clientset).Tracker().Add(tt.hcp) + + hook, _ := withHyperShiftLabels(c) + deployment := getTestDeployment() + // Sync the informers with the client as the last step, withHyperShiftLabels() + // must create necessary informers before. + clients.SyncFakeInformers(t, c) + + // Act + err := hook(&cr.Spec.OperatorSpec, deployment) + if err != nil { + t.Fatalf("unexpected hook error: %v", err) + } + // Assert + for key, expectedValue := range tt.expectedLabels { + value, exist := deployment.Spec.Template.Labels[key] + if !exist || value != expectedValue { + t.Errorf("expected labels %s to exist with value %s", key, expectedValue) + } + } + }) + } +} + func Test_WithHyperShiftControlPlaneImages(t *testing.T) { tests := []struct { name string diff --git a/pkg/driver/common/operator/test_manifests/hcp_labels.yaml b/pkg/driver/common/operator/test_manifests/hcp_labels.yaml new file mode 100644 index 000000000..7cefcd760 --- /dev/null +++ b/pkg/driver/common/operator/test_manifests/hcp_labels.yaml @@ -0,0 +1,9 @@ +apiVersion: hypershift.openshift.io/v1beta1 +kind: HostedControlPlane +metadata: + name: test + namespace: clusters-test +spec: + labels: + foo: bar + baz: "" diff --git a/pkg/driver/common/operator/test_manifests/hcp_no_labels.yaml b/pkg/driver/common/operator/test_manifests/hcp_no_labels.yaml new file mode 100644 index 000000000..2ff99e69f --- /dev/null +++ b/pkg/driver/common/operator/test_manifests/hcp_no_labels.yaml @@ -0,0 +1,6 @@ +apiVersion: hypershift.openshift.io/v1beta1 +kind: HostedControlPlane +metadata: + name: test + namespace: clusters-test +spec: {}