diff --git a/go.mod b/go.mod index 66936173a773..b229cae716f6 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/openshift/origin -go 1.24.0 +go 1.24.6 require ( cloud.google.com/go/storage v1.56.0 @@ -66,6 +66,7 @@ require ( github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 github.com/openshift/library-go v0.0.0-20251015151611-6fc7a74b67c5 + github.com/operator-framework/api v0.36.0 github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417 github.com/pborman/uuid v1.2.0 github.com/pkg/errors v0.9.1 @@ -76,11 +77,11 @@ require ( github.com/prometheus/common v0.65.0 github.com/rs/zerolog v1.34.0 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.7 + github.com/spf13/cobra v1.10.1 + github.com/spf13/pflag v1.0.9 github.com/spf13/viper v1.8.1 github.com/stretchr/objx v0.5.2 - github.com/stretchr/testify v1.11.0 + github.com/stretchr/testify v1.11.1 github.com/tecbiz-ch/nutanix-go-sdk v0.1.15 github.com/tidwall/gjson v1.18.0 github.com/tidwall/pretty v1.2.0 @@ -90,7 +91,7 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.6.4 go.etcd.io/etcd/client/v3 v3.6.4 golang.org/x/crypto v0.42.0 - golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/mod v0.27.0 golang.org/x/net v0.43.0 golang.org/x/oauth2 v0.30.0 @@ -119,7 +120,7 @@ require ( k8s.io/pod-security-admission v0.34.1 k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 sigs.k8s.io/cloud-provider-azure v1.30.4 - sigs.k8s.io/controller-runtime v0.22.1 + sigs.k8s.io/controller-runtime v0.22.3 sigs.k8s.io/gateway-api v1.4.0 sigs.k8s.io/kustomize/kyaml v0.20.1 sigs.k8s.io/structured-merge-diff/v6 v6.3.0 @@ -164,13 +165,14 @@ require ( github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b // indirect - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/campoy/embedmd v1.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cloudflare/circl v1.6.0 // indirect @@ -235,7 +237,7 @@ require ( github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cadvisor v0.52.1 // indirect - github.com/google/cel-go v0.26.0 // indirect + github.com/google/cel-go v0.26.1 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect @@ -248,7 +250,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 // indirect github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -334,7 +336,7 @@ require ( github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/src-d/gcfg v1.4.0 // indirect github.com/std-uritemplate/std-uritemplate/go/v2 v2.0.3 // indirect - github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/stoewer/go-strcase v1.3.1 // indirect github.com/subosito/gotenv v1.2.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect @@ -356,13 +358,13 @@ require ( go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect go.opentelemetry.io/otel/metric v1.37.0 // indirect go.opentelemetry.io/otel/sdk v1.37.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect go.opentelemetry.io/otel/trace v1.37.0 // indirect - go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.3.0 // indirect go.uber.org/mock v0.4.0 // indirect @@ -401,7 +403,7 @@ require ( k8s.io/kubelet v0.31.1 // indirect k8s.io/mount-utils v0.0.0 // indirect k8s.io/sample-apiserver v0.0.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.29 // indirect sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect diff --git a/go.sum b/go.sum index f952a06e27ae..28f4921e9c2b 100644 --- a/go.sum +++ b/go.sum @@ -201,8 +201,8 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1L github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= -github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= @@ -228,6 +228,8 @@ github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -493,8 +495,8 @@ github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cadvisor v0.52.1 h1:sC8SZ6jio9ds+P2dk51bgbeYeufxo55n0X3tmrpA9as= github.com/google/cadvisor v0.52.1/go.mod h1:OAhPcx1nOm5YwMh/JhpUOMKyv1YKLRtS9KgzWPndHmA= -github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= -github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= +github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -575,8 +577,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE= github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= @@ -890,6 +892,8 @@ github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 h1:PMTg github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/operator-framework/api v0.36.0 h1:6+duRhamCvB540JbvNp/1+Pot7luff7HqdAOm9bAntg= +github.com/operator-framework/api v0.36.0/go.mod h1:QSmHMx8XpGsNWvjU5CUelVZC916VLp/TZhfYvGKpghM= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417 h1:7k+dokKFfpICbkpX5TvvpFbKTFsl/6YQd46EpY2JNhc= github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417/go.mod h1:9LxDV3rAHlGHAYtVrT62y/fqfIxc5RrDiYi9RVeD0gg= @@ -973,14 +977,13 @@ github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= @@ -989,8 +992,8 @@ github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/std-uritemplate/std-uritemplate/go/v2 v2.0.3 h1:7hth9376EoQEd1hH4lAp3vnaLP2UMyxuMMghLKzDHyU= github.com/std-uritemplate/std-uritemplate/go/v2 v2.0.3/go.mod h1:Z5KcoM0YLC7INlNhEezeIZ0TZNYf7WSNO0Lvah4DSeQ= -github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= -github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= +github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -1007,8 +1010,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= -github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tecbiz-ch/nutanix-go-sdk v0.1.15 h1:ZT5I6OFGswvMceujUE10ZXPNnT5UQIW9gAX4FEFK6Ds= @@ -1092,10 +1095,10 @@ go.opentelemetry.io/contrib/propagators/b3 v1.19.0 h1:ulz44cpm6V5oAeg5Aw9HyqGFMS go.opentelemetry.io/contrib/propagators/b3 v1.19.0/go.mod h1:OzCmE2IVS+asTI+odXQstRGVfXQ4bXv9nMBRK0nNyqQ= go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= @@ -1108,8 +1111,8 @@ go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFh go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= -go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= +go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -1158,8 +1161,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= -golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo= @@ -1603,16 +1606,16 @@ rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/cloud-provider-azure v1.30.4 h1:KD9wH7JAEnYNLQnk6tT78RHGc2iIx1F5BnNBtpm3uAI= sigs.k8s.io/cloud-provider-azure v1.30.4/go.mod h1:MAzEM7J+Kg949oUwNdzft7N6SCj81DtEJclMxCXwv3U= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.29 h1:qiifAaaBqV3d/EcN9dKJaJI8S9FD/JhBOwrTPp+MBJY= sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.29/go.mod h1:ZFAt0qF1kR+w8nBVJK56s6CFvLrlosN1i2c+Sxb7LBk= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16 h1:Fm/Yjv4nXjUtJ90uXKSKwPwaTWYuDFMhDNNOd77PlOg= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16/go.mod h1:+kl90flu4+WCP6HBGVYbKVQR+5ztDzUNrWJz8rsnvRU= -sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg= -sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= +sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y= +sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= sigs.k8s.io/gateway-api v1.4.0 h1:ZwlNM6zOHq0h3WUX2gfByPs2yAEsy/EenYJB78jpQfQ= sigs.k8s.io/gateway-api v1.4.0/go.mod h1:AR5RSqciWP98OPckEjOjh2XJhAe2Na4LHyXD2FUY7Qk= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= diff --git a/test/extended/storage/storage_networkpolicy.go b/test/extended/storage/storage_networkpolicy.go index c695024d2ce7..b84324023b29 100644 --- a/test/extended/storage/storage_networkpolicy.go +++ b/test/extended/storage/storage_networkpolicy.go @@ -5,15 +5,39 @@ import ( "fmt" "strings" + "github.com/blang/semver/v4" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" exutil "github.com/openshift/origin/test/extended/util" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" e2e "k8s.io/kubernetes/test/e2e/framework" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// ResourceType defines the type of Kubernetes workload resource +// storage_networkpolicy.go contains tests for verifying network policy configurations +// for storage-related operators in OpenShift. +// +// This test suite validates that storage operators and their workload resources have +// the correct network policy labels to ensure proper network segmentation and security. +// +// Test Coverage: +// 1. CSO (Cluster Storage Operator) resources - verifies network policy labels on +// cluster-storage-operator, csi-snapshot-controller, and related deployments +// 2. CSI driver resources - verifies network policy labels on platform-specific CSI +// drivers (AWS EBS/EFS, Azure Disk/File, GCP PD/Filestore, vSphere, IBM Cloud, +// OpenStack Cinder/Manila, SMB) +// 3. LSO (Local Storage Operator) resources - verifies network policy labels on +// related deployment and diskmaker daemonsets +// 4. NetworkPolicy resources - ensures required NetworkPolicies exist with correct +// PodSelector labels for CSO, CSI, and LSO namespaces +// +// LSO-specific Design Considerations: +// - LSONamespace is defined as a variable (not constant) because LSO can be installed +// in a user-specified namespace, allowing for customization based on actual deployment + +// ResourceType defines the type of Openshift workload resource type ResourceType string const ( @@ -21,6 +45,13 @@ const ( ResourceTypeDaemonSet ResourceType = "DaemonSet" ) +// lsoInfo contains information about the Local Storage Operator installation +type lsoInfo struct { + Installed bool + Namespace string + Version string +} + // resourceCheck defines a check for a workload resource (Deployment, DaemonSet, etc.) type resourceCheck struct { ResourceType ResourceType @@ -37,6 +68,11 @@ var ( npLabelOperatorMetricsRange = map[string]string{"openshift.storage.network-policy.operator-metrics-range": "allow"} npLabelMetricsRange = map[string]string{"openshift.storage.network-policy.metrics-range": "allow"} npLabelAllEgress = map[string]string{"openshift.storage.network-policy.all-egress": "allow"} + // LSO specific network policy labels + npLabelLSOAPIServer = map[string]string{"openshift.storage.network-policy.lso.api-server": "allow"} + npLabelLSODNS = map[string]string{"openshift.storage.network-policy.lso.dns": "allow"} + npLabelLSOOperatorMetrics = map[string]string{"openshift.storage.network-policy.lso.operator-metrics": "allow"} + npLabelLSODiskmakerMetrics = map[string]string{"openshift.storage.network-policy.lso.diskmaker-metrics": "allow"} ) func mergeLabels(maps ...map[string]string) map[string]string { @@ -58,6 +94,9 @@ var ( csiOperatorWithAllEgressRequiredLabels = mergeLabels(npLabelAPI, npLabelDNS, npLabelOperatorMetricsRange, npLabelAllEgress) csiControllerRequiredLabels = mergeLabels(npLabelAPI, npLabelDNS, npLabelMetricsRange) csiControllerWithAllEgressRequiredLabels = mergeLabels(npLabelAPI, npLabelDNS, npLabelMetricsRange, npLabelAllEgress) + // LSO specific required labels + lsoOperatorRequiredLabels = mergeLabels(npLabelLSOAPIServer, npLabelLSODNS, npLabelLSOOperatorMetrics) + lsoDiskmakerRequiredLabels = mergeLabels(npLabelLSOAPIServer, npLabelLSODNS, npLabelLSODiskmakerMetrics) ) type npCheck struct { @@ -101,11 +140,26 @@ var networkPolicyChecks = []npCheck{ // }, } +// getLSONetworkPolicyCheck returns the LSO network policy check configuration +// based on the detected LSO installation information +func getLSONetworkPolicyCheck(lso *lsoInfo) npCheck { + return npCheck{ + Namespace: lso.Namespace, + RequiredPodSelectors: []map[string]string{ + npLabelLSOAPIServer, + npLabelLSODNS, + npLabelLSOOperatorMetrics, + npLabelLSODiskmakerMetrics, + }, + } +} + var _ = g.Describe("[sig-storage][OCPFeature:StorageNetworkPolicy] Storage Network Policy", func() { defer g.GinkgoRecover() var ( oc = exutil.NewCLI("storage-network-policy") currentPlatform = e2e.TestContext.Provider + lsoInstallInfo *lsoInfo // LSO installation information detected once per suite ) g.BeforeEach(func() { @@ -114,6 +168,20 @@ var _ = g.Describe("[sig-storage][OCPFeature:StorageNetworkPolicy] Storage Netwo if isMicroShift { g.Skip("Storage Network Policy tests are not supported on MicroShift") } + + // Detect LSO installation only once (cache the result) + if lsoInstallInfo == nil { + lsoInstallInfo, err = getLSOInfo(oc) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to detect LSO installation") + + if lsoInstallInfo.Installed { + supported := isLSOVersionSupported(lsoInstallInfo.Version) + g.By(fmt.Sprintf("Detected LSO installed in namespace: %s, version: %s (network policy support: %v)", + lsoInstallInfo.Namespace, lsoInstallInfo.Version, supported)) + } else { + g.By("LSO is not installed on this cluster") + } + } }) g.It("should verify required labels for CSO related Operators", func() { @@ -296,6 +364,59 @@ var _ = g.Describe("[sig-storage][OCPFeature:StorageNetworkPolicy] Storage Netwo runResourceChecks(oc, CSIResourcesToCheck, currentPlatform) }) + g.It("should verify required labels for LSO related resources", func() { + // Skip if LSO is not installed or version is lower than 4.21.0 + if !lsoInstallInfo.Installed { + g.Skip("LSO is not installed on this cluster") + } + + if !isLSOVersionSupported(lsoInstallInfo.Version) { + g.Skip(fmt.Sprintf("LSO network policy support requires version >= 4.21.0, current version: %s", lsoInstallInfo.Version)) + } + + LSOResourcesToCheck := []resourceCheck{ + { + ResourceType: ResourceTypeDeployment, + Namespace: lsoInstallInfo.Namespace, + Name: "local-storage-operator", + Platform: "all", + RequiredLabels: lsoOperatorRequiredLabels, + }, + { + ResourceType: ResourceTypeDaemonSet, + Namespace: lsoInstallInfo.Namespace, + Name: "diskmaker-manager", + Platform: "all", + RequiredLabels: lsoDiskmakerRequiredLabels, + }, + { + ResourceType: ResourceTypeDaemonSet, + Namespace: lsoInstallInfo.Namespace, + Name: "diskmaker-discovery", + Platform: "all", + RequiredLabels: lsoDiskmakerRequiredLabels, + }, + } + + runResourceChecks(oc, LSOResourcesToCheck, currentPlatform) + }) + + g.It("should ensure required NetworkPolicies exist with correct labels for LSO", func() { + // Skip if LSO is not installed or version is lower than 4.21.0 + if !lsoInstallInfo.Installed { + g.Skip("LSO is not installed on this cluster") + } + + if !isLSOVersionSupported(lsoInstallInfo.Version) { + g.Skip(fmt.Sprintf("LSO network policy support requires version >= 4.21.0, current version: %s", lsoInstallInfo.Version)) + } + + // Get LSO network policy check configuration + lsoCheck := getLSONetworkPolicyCheck(lsoInstallInfo) + + verifyNetworkPolicyPodSelectors(oc, lsoCheck.Namespace, lsoCheck.RequiredPodSelectors) + }) + g.It("should ensure required NetworkPolicies exist with correct labels", func() { for _, c := range networkPolicyChecks { _, err := oc.AdminKubeClient().CoreV1().Namespaces().Get(context.TODO(), c.Namespace, metav1.GetOptions{}) @@ -310,36 +431,7 @@ var _ = g.Describe("[sig-storage][OCPFeature:StorageNetworkPolicy] Storage Netwo g.Fail(fmt.Sprintf("Error fetching namespace %s: %v", c.Namespace, err)) } - // List all NetworkPolicies in the namespace - npList, err := oc.AdminKubeClient().NetworkingV1().NetworkPolicies(c.Namespace).List(context.TODO(), metav1.ListOptions{}) - o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("failed to list NetworkPolicies in namespace %s", c.Namespace)) - - // For each required PodSelector, verify that at least one NetworkPolicy has it - for _, requiredSelector := range c.RequiredPodSelectors { - found := false - var matchedNPName string - - for _, np := range npList.Items { - if podSelectorContainsLabels(np.Spec.PodSelector.MatchLabels, requiredSelector) { - found = true - matchedNPName = np.Name - break - } - } - - // Format the required selector for error messages - labelPairs := []string{} - for k, v := range requiredSelector { - labelPairs = append(labelPairs, fmt.Sprintf("%s=%s", k, v)) - } - selectorDesc := strings.Join(labelPairs, ", ") - - if found { - g.By(fmt.Sprintf("Found NetworkPolicy %s/%s with required PodSelector labels: %s", c.Namespace, matchedNPName, selectorDesc)) - } else { - o.Expect(found).To(o.BeTrue(), fmt.Sprintf("No NetworkPolicy in namespace %s has PodSelector with labels: %s", c.Namespace, selectorDesc)) - } - } + verifyNetworkPolicyPodSelectors(oc, c.Namespace, c.RequiredPodSelectors) } }) }) @@ -404,6 +496,70 @@ func runResourceChecks(oc *exutil.CLI, resources []resourceCheck, currentPlatfor } } +// isLSOVersionSupported checks if the LSO version is 4.21.0 or higher +// Supported version formats: "4.21.0", "4.21.0-202511252120" +func isLSOVersionSupported(versionStr string) bool { + // Minimum required version for LSO network policy support + minVersion := semver.MustParse("4.21.0") + + // Parse the LSO version + // The version string may contain build metadata (e.g., "4.21.0-202511252120") + // semver.Parse handles this correctly + version, err := semver.Parse(versionStr) + if err != nil { + e2e.Logf("Failed to parse LSO version %q: %v", versionStr, err) + return false + } + + // Compare versions: returns true if version >= minVersion + return version.GTE(minVersion) +} + +// getLSOInfo detects if LSO is installed by searching for local-storage-operator CSV +// across all namespaces and returns its namespace and version information +func getLSOInfo(oc *exutil.CLI) (*lsoInfo, error) { + info := &lsoInfo{ + Installed: false, + } + + // Create controller-runtime client + clusterConfig := oc.AdminConfig() + clusterClient, err := client.New(clusterConfig, client.Options{}) + if err != nil { + return info, fmt.Errorf("failed to create controller-runtime client: %v", err) + } + + // Add operatorsv1alpha1 to scheme + err = operatorsv1alpha1.AddToScheme(clusterClient.Scheme()) + if err != nil { + return info, fmt.Errorf("failed to add operators.coreos.com/v1alpha1 to scheme: %v", err) + } + + // List all ClusterServiceVersions across all namespaces + csvList := &operatorsv1alpha1.ClusterServiceVersionList{} + err = clusterClient.List(context.TODO(), csvList) + if err != nil { + return info, fmt.Errorf("failed to list ClusterServiceVersions: %v", err) + } + + // Search for local-storage-operator CSV + for _, csv := range csvList.Items { + // Match CSV name pattern: local-storage-operator.* + if strings.HasPrefix(csv.Name, "local-storage-operator") { + // Only consider CSVs in Succeeded phase + if csv.Status.Phase == operatorsv1alpha1.CSVPhaseSucceeded { + info.Installed = true + info.Namespace = csv.Namespace + info.Version = csv.Spec.Version.String() + return info, nil + } + } + } + + // LSO not found or not in Succeeded phase + return info, nil +} + // podSelectorContainsLabels checks if actualLabels contains all key-value pairs from requiredLabels func podSelectorContainsLabels(actualLabels map[string]string, requiredLabels map[string]string) bool { for key, value := range requiredLabels { @@ -413,3 +569,37 @@ func podSelectorContainsLabels(actualLabels map[string]string, requiredLabels ma } return true } + +// verifyNetworkPolicyPodSelectors verifies that for each required PodSelector, at least one NetworkPolicy has it +func verifyNetworkPolicyPodSelectors(oc *exutil.CLI, namespace string, requiredPodSelectors []map[string]string) { + // List all NetworkPolicies in the namespace + npList, err := oc.AdminKubeClient().NetworkingV1().NetworkPolicies(namespace).List(context.TODO(), metav1.ListOptions{}) + o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("failed to list NetworkPolicies in namespace %s", namespace)) + + // For each required PodSelector, verify that at least one NetworkPolicy has it + for _, requiredSelector := range requiredPodSelectors { + found := false + var matchedNPName string + + for _, np := range npList.Items { + if podSelectorContainsLabels(np.Spec.PodSelector.MatchLabels, requiredSelector) { + found = true + matchedNPName = np.Name + break + } + } + + // Format the required selector for error messages + labelPairs := []string{} + for k, v := range requiredSelector { + labelPairs = append(labelPairs, fmt.Sprintf("%s=%s", k, v)) + } + selectorDesc := strings.Join(labelPairs, ", ") + + if found { + g.By(fmt.Sprintf("Found NetworkPolicy %s/%s with required PodSelector labels: %s", namespace, matchedNPName, selectorDesc)) + } else { + o.Expect(found).To(o.BeTrue(), fmt.Sprintf("No NetworkPolicy in namespace %s has PodSelector with labels: %s", namespace, selectorDesc)) + } + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go index 3bb4fd7c4e0a..48bd362bf53f 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go +++ b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go @@ -17,9 +17,9 @@ ANTLR4 that it is compatible with (I.E. uses the /v4 path). However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code. This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not -list the release tag such as @4.12.0 - this was confusing, to say the least. +list the release tag such as @4.13.1 - this was confusing, to say the least. -As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` +As of 4.13.0, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` (the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information, which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs]. @@ -49,7 +49,7 @@ Here is a general/recommended template for an ANTLR based recognizer in Go: . ├── parser │ ├── mygrammar.g4 - │ ├── antlr-4.12.1-complete.jar + │ ├── antlr-4.13.1-complete.jar │ ├── generate.go │ └── generate.sh ├── parsing - generated code goes here @@ -71,7 +71,7 @@ And the generate.sh file will look similar to this: #!/bin/sh - alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + alias antlr4='java -Xmx500M -cp "./antlr4-4.13.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4 depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn.go b/vendor/github.com/antlr4-go/antlr/v4/atn.go index cdeefed24789..e749ebd0cfda 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/atn.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn.go @@ -4,8 +4,6 @@ package antlr -import "sync" - // ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or // which is invalid for a particular struct such as [*antlr.BaseRuleContext] var ATNInvalidAltNumber int @@ -56,9 +54,9 @@ type ATN struct { // states []ATNState - mu sync.Mutex - stateMu sync.RWMutex - edgeMu sync.RWMutex + mu Mutex + stateMu RWMutex + edgeMu RWMutex } // NewATN returns a new ATN struct representing the given grammarType and is used diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go index a83f25d3492e..267308bb3d00 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go @@ -73,9 +73,6 @@ func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *AT // NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors' // are just wrappers around this one. func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed - } b := &ATNConfig{} b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext) b.cType = parserConfig diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go index b737fe85fbd0..ab4e96be524a 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go @@ -148,7 +148,7 @@ func (is *InputStream) GetTextFromInterval(i Interval) string { } func (*InputStream) GetSourceName() string { - return "" + return "Obtained from string" } // String returns the entire input stream as a string diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go index ceccd96d2584..6d668f798360 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go +++ b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go @@ -8,7 +8,6 @@ import ( "container/list" "runtime/debug" "sort" - "sync" ) // Collectable is an interface that a struct should implement if it is to be @@ -587,12 +586,12 @@ type VisitRecord struct { type VisitList struct { cache *list.List - lock sync.RWMutex + lock RWMutex } var visitListPool = VisitList{ cache: list.New(), - lock: sync.RWMutex{}, + lock: RWMutex{}, } // NewVisitRecord returns a new VisitRecord instance from the pool if available. diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/vendor/github.com/antlr4-go/antlr/v4/lexer.go index 3c7896a9183d..e5594b2168a1 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/lexer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer.go @@ -207,7 +207,7 @@ func (b *BaseLexer) NextToken() Token { for { b.thetype = TokenInvalidType - ttype := b.safeMatch() + ttype := b.safeMatch() // Defaults to LexerSkip if b.input.LA(1) == TokenEOF { b.hitEOF = true diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go index 4955ac876f97..dfdff000bc24 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go @@ -40,6 +40,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { for alt := 0; alt < count; alt++ { look[alt] = NewIntervalSet() + // TODO: This is one of the reasons that ATNConfigs are allocated and freed all the time - fix this tomorrow jim! lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) diff --git a/vendor/github.com/antlr4-go/antlr/v4/mutex.go b/vendor/github.com/antlr4-go/antlr/v4/mutex.go new file mode 100644 index 000000000000..2b0cda4745ac --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/mutex.go @@ -0,0 +1,41 @@ +//go:build !antlr.nomutex +// +build !antlr.nomutex + +package antlr + +import "sync" + +// Mutex is a simple mutex implementation which just delegates to sync.Mutex, it +// is used to provide a mutex implementation for the antlr package, which users +// can turn off with the build tag -tags antlr.nomutex +type Mutex struct { + mu sync.Mutex +} + +func (m *Mutex) Lock() { + m.mu.Lock() +} + +func (m *Mutex) Unlock() { + m.mu.Unlock() +} + +type RWMutex struct { + mu sync.RWMutex +} + +func (m *RWMutex) Lock() { + m.mu.Lock() +} + +func (m *RWMutex) Unlock() { + m.mu.Unlock() +} + +func (m *RWMutex) RLock() { + m.mu.RLock() +} + +func (m *RWMutex) RUnlock() { + m.mu.RUnlock() +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go b/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go new file mode 100644 index 000000000000..35ce4353ee21 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go @@ -0,0 +1,32 @@ +//go:build antlr.nomutex +// +build antlr.nomutex + +package antlr + +type Mutex struct{} + +func (m *Mutex) Lock() { + // No-op +} + +func (m *Mutex) Unlock() { + // No-op +} + +type RWMutex struct{} + +func (m *RWMutex) Lock() { + // No-op +} + +func (m *RWMutex) Unlock() { + // No-op +} + +func (m *RWMutex) RLock() { + // No-op +} + +func (m *RWMutex) RUnlock() { + // No-op +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go index ae2869692a67..724fa17a1994 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go @@ -10,8 +10,6 @@ import ( "strings" ) -var () - // ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over // a standard JStore so that we can use Lazy instantiation of the JStore, mostly // to avoid polluting the stats module with a ton of JStore instances with nothing in them. @@ -883,7 +881,7 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre // the ERROR state was reached, outerContext as the initial parser context from the paper // or the parser stack at the instant before prediction commences. // -// Teh func returns the value to return from [AdaptivePredict], or +// The func returns the value to return from [AdaptivePredict], or // [ATNInvalidAltNumber] if a suitable alternative was not // identified and [AdaptivePredict] should report an error instead. func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int { diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go index c1b80cc1f0fd..a1d5186b8f11 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go @@ -6,7 +6,6 @@ package antlr import ( "fmt" - "golang.org/x/exp/slices" "strconv" ) @@ -101,7 +100,7 @@ func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) hash = murmurUpdate(hash, returnState) } hash = murmurFinish(hash, len(parents)<<1) - + nec := &PredictionContext{} nec.cachedHash = hash nec.pcType = PredictionContextArray @@ -115,6 +114,9 @@ func (p *PredictionContext) Hash() int { } func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + if p == other { + return true + } switch p.pcType { case PredictionContextEmpty: otherP := other.(*PredictionContext) @@ -138,13 +140,11 @@ func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool if p.cachedHash != other.Hash() { return false // can't be same if hash is different } - + // Must compare the actual array elements and not just the array address // - return slices.Equal(p.returnStates, other.returnStates) && - slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool { - return x.Equals(y) - }) + return intSlicesEqual(p.returnStates, other.returnStates) && + pcSliceEqual(p.parents, other.parents) } func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { @@ -152,23 +152,23 @@ func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext return false } otherP := other.(*PredictionContext) - if otherP == nil { + if otherP == nil || otherP.pcType != PredictionContextSingleton { return false } - + if p.cachedHash != otherP.Hash() { return false // Can't be same if hash is different } - + if p.returnState != otherP.getReturnState(0) { return false } - + // Both parents must be nil if one is if p.parentCtx == nil { return otherP.parentCtx == nil } - + return p.parentCtx.Equals(otherP.parentCtx) } @@ -225,27 +225,27 @@ func (p *PredictionContext) String() string { return "$" case PredictionContextSingleton: var up string - + if p.parentCtx == nil { up = "" } else { up = p.parentCtx.String() } - + if len(up) == 0 { if p.returnState == BasePredictionContextEmptyReturnState { return "$" } - + return strconv.Itoa(p.returnState) } - + return strconv.Itoa(p.returnState) + " " + up case PredictionContextArray: if p.isEmpty() { return "[]" } - + s := "[" for i := 0; i < len(p.returnStates); i++ { if i > 0 { @@ -263,7 +263,7 @@ func (p *PredictionContext) String() string { } } return s + "]" - + default: return "unknown" } @@ -309,18 +309,18 @@ func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *Predict parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) state := a.states[outerContext.GetInvokingState()] transition := state.GetTransitions()[0] - + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) } func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { - + // Share same graph if both same // if a == b || a.Equals(b) { return a } - + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { return mergeSingletons(a, b, rootIsWildcard, mergeCache) } @@ -334,7 +334,7 @@ func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *Pr return b } } - + // Convert either Singleton or Empty to arrays, so that we can merge them // ara := convertToArray(a) @@ -395,7 +395,7 @@ func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *J return previous } } - + rootMerge := mergeRoot(a, b, rootIsWildcard) if rootMerge != nil { if mergeCache != nil { @@ -564,7 +564,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa i := 0 // walks a j := 0 // walks b k := 0 // walks target M array - + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates)) // walk and merge to yield mergedParents, mergedReturnStates @@ -626,9 +626,9 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa mergedParents = mergedParents[0:k] mergedReturnStates = mergedReturnStates[0:k] } - + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - + // if we created same array as a or b, return that instead // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation if M.Equals(a) { @@ -650,7 +650,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa return b } combineCommonParents(&mergedParents) - + if mergeCache != nil { mergeCache.Put(a, b, M) } @@ -666,7 +666,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa //goland:noinspection GoUnusedFunction func combineCommonParents(parents *[]*PredictionContext) { uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext") - + for p := 0; p < len(*parents); p++ { parent := (*parents)[p] _, _ = uniqueParents.Put(parent) @@ -685,7 +685,7 @@ func getCachedBasePredictionContext(context *PredictionContext, contextCache *Pr if present { return existing } - + existing, present = contextCache.Get(context) if present { visited.Put(context, existing) @@ -722,6 +722,6 @@ func getCachedBasePredictionContext(context *PredictionContext, contextCache *Pr contextCache.add(updated) visited.Put(updated, updated) visited.Put(context, updated) - + return updated } diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go index 2e0b504fb3db..dcb8548cd175 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go @@ -56,7 +56,7 @@ var tokenTypeMapCache = make(map[string]int) var ruleIndexMapCache = make(map[string]int) func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.12.0" + runtimeVersion := "4.13.1" if runtimeVersion != toolVersion { fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) } diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go index 70c0673a0f6b..8cb5f3ed6f63 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/statistics.go +++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go @@ -9,7 +9,6 @@ import ( "path/filepath" "sort" "strconv" - "sync" ) // This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default @@ -30,7 +29,7 @@ type goRunStats struct { // within this package. // jStats []*JStatRec - jStatsLock sync.RWMutex + jStatsLock RWMutex topN int topNByMax []*JStatRec topNByUsed []*JStatRec diff --git a/vendor/github.com/antlr4-go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go index 9670efb829e4..f5bc34229df1 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/token.go +++ b/vendor/github.com/antlr4-go/antlr/v4/token.go @@ -104,6 +104,25 @@ func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { return b.source } +func (b *BaseToken) GetText() string { + if b.text != "" { + return b.text + } + input := b.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if b.GetStart() < n && b.GetStop() < n { + return input.GetTextFromInterval(NewInterval(b.GetStart(), b.GetStop())) + } + return "" +} + +func (b *BaseToken) SetText(text string) { + b.text = text +} + func (b *BaseToken) GetTokenIndex() int { return b.tokenIndex } @@ -120,6 +139,28 @@ func (b *BaseToken) GetInputStream() CharStream { return b.source.charStream } +func (b *BaseToken) String() string { + txt := b.GetText() + if txt != "" { + txt = strings.Replace(txt, "\n", "\\n", -1) + txt = strings.Replace(txt, "\r", "\\r", -1) + txt = strings.Replace(txt, "\t", "\\t", -1) + } else { + txt = "" + } + + var ch string + if b.GetChannel() > 0 { + ch = ",channel=" + strconv.Itoa(b.GetChannel()) + } else { + ch = "" + } + + return "[@" + strconv.Itoa(b.GetTokenIndex()) + "," + strconv.Itoa(b.GetStart()) + ":" + strconv.Itoa(b.GetStop()) + "='" + + txt + "',<" + strconv.Itoa(b.GetTokenType()) + ">" + + ch + "," + strconv.Itoa(b.GetLine()) + ":" + strconv.Itoa(b.GetColumn()) + "]" +} + type CommonToken struct { BaseToken } @@ -170,44 +211,3 @@ func (c *CommonToken) clone() *CommonToken { t.text = c.GetText() return t } - -func (c *CommonToken) GetText() string { - if c.text != "" { - return c.text - } - input := c.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if c.start < n && c.stop < n { - return input.GetTextFromInterval(NewInterval(c.start, c.stop)) - } - return "" -} - -func (c *CommonToken) SetText(text string) { - c.text = text -} - -func (c *CommonToken) String() string { - txt := c.GetText() - if txt != "" { - txt = strings.Replace(txt, "\n", "\\n", -1) - txt = strings.Replace(txt, "\r", "\\r", -1) - txt = strings.Replace(txt, "\t", "\\t", -1) - } else { - txt = "" - } - - var ch string - if c.channel > 0 { - ch = ",channel=" + strconv.Itoa(c.channel) - } else { - ch = "" - } - - return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + - txt + "',<" + strconv.Itoa(c.tokenType) + ">" + - ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" -} diff --git a/vendor/github.com/antlr4-go/antlr/v4/utils.go b/vendor/github.com/antlr4-go/antlr/v4/utils.go index 733d7df9dc74..36a37f247acb 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/utils.go +++ b/vendor/github.com/antlr4-go/antlr/v4/utils.go @@ -326,3 +326,56 @@ func isDirectory(dir string) (bool, error) { } return fileInfo.IsDir(), err } + +// intSlicesEqual returns true if the two slices of ints are equal, and is a little +// faster than slices.Equal. +func intSlicesEqual(s1, s2 []int) bool { + if s1 == nil && s2 == nil { + return true + } + if s1 == nil || s2 == nil { + return false + } + if len(s1) == 0 && len(s2) == 0 { + return true + } + + if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) { + return false + } + // If the slices are using the same memory, then they are the same slice + if &s1[0] == &s2[0] { + return true + } + for i, v := range s1 { + if v != s2[i] { + return false + } + } + return true +} + +func pcSliceEqual(s1, s2 []*PredictionContext) bool { + if s1 == nil && s2 == nil { + return true + } + if s1 == nil || s2 == nil { + return false + } + if len(s1) == 0 && len(s2) == 0 { + return true + } + if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) { + return false + } + // If the slices are using the same memory, then they are the same slice + if &s1[0] == &s2[0] { + return true + } + for i, v := range s1 { + if !v.Equals(s2[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/cenkalti/backoff/v5/.gitignore b/vendor/github.com/cenkalti/backoff/v5/.gitignore new file mode 100644 index 000000000000..50d95c548b67 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md new file mode 100644 index 000000000000..658c37436d9c --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [5.0.0] - 2024-12-19 + +### Added + +- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry. + +### Changed + +- Retry function now accepts additional options for specifying max number of tries and max elapsed time. +- Retry function now accepts a context.Context. +- Operation function signature changed to return result (any type) and error. + +### Removed + +- RetryNotify* and RetryWithData functions. Only single Retry function remains. +- Optional arguments from ExponentialBackoff constructor. +- Clock and Timer interfaces. + +### Fixed + +- The original error is returned from Retry if there's a PermanentError. (#144) +- The Retry function respects the wrapped PermanentError. (#140) diff --git a/vendor/github.com/cenkalti/backoff/v5/LICENSE b/vendor/github.com/cenkalti/backoff/v5/LICENSE new file mode 100644 index 000000000000..89b817996558 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v5/README.md b/vendor/github.com/cenkalti/backoff/v5/README.md new file mode 100644 index 000000000000..4611b1d1701a --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/README.md @@ -0,0 +1,31 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end. + +For most cases, use `Retry` function. See [example_test.go][example] for an example. + +If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go +[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go diff --git a/vendor/github.com/cenkalti/backoff/v5/backoff.go b/vendor/github.com/cenkalti/backoff/v5/backoff.go new file mode 100644 index 000000000000..dd2b24ca7350 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // backoff.Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff() + // if duration == backoff.Stop { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v5/error.go b/vendor/github.com/cenkalti/backoff/v5/error.go new file mode 100644 index 000000000000..beb2b38a23d7 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/error.go @@ -0,0 +1,46 @@ +package backoff + +import ( + "fmt" + "time" +) + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} + +// Error returns a string representation of the Permanent error. +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Unwrap returns the wrapped error. +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// RetryAfterError signals that the operation should be retried after the given duration. +type RetryAfterError struct { + Duration time.Duration +} + +// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying. +func RetryAfter(seconds int) error { + return &RetryAfterError{Duration: time.Duration(seconds) * time.Second} +} + +// Error returns a string representation of the RetryAfter error. +func (e *RetryAfterError) Error() string { + return fmt.Sprintf("retry after %s", e.Duration) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go new file mode 100644 index 000000000000..c1f3e442d364 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -0,0 +1,125 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + + currentInterval time.Duration +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + return &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + } +} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval +} + +// NextBackOff calculates the next backoff interval using the formula: +// +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + if b.currentInterval == 0 { + b.currentInterval = b.InitialInterval + } + + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + return next +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go new file mode 100644 index 000000000000..e43f47fb8a55 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -0,0 +1,139 @@ +package backoff + +import ( + "context" + "errors" + "time" +) + +// DefaultMaxElapsedTime sets a default limit for the total retry duration. +const DefaultMaxElapsedTime = 15 * time.Minute + +// Operation is a function that attempts an operation and may be retried. +type Operation[T any] func() (T, error) + +// Notify is a function called on operation error with the error and backoff duration. +type Notify func(error, time.Duration) + +// retryOptions holds configuration settings for the retry mechanism. +type retryOptions struct { + BackOff BackOff // Strategy for calculating backoff periods. + Timer timer // Timer to manage retry delays. + Notify Notify // Optional function to notify on each retry error. + MaxTries uint // Maximum number of retry attempts. + MaxElapsedTime time.Duration // Maximum total time for all retries. +} + +type RetryOption func(*retryOptions) + +// WithBackOff configures a custom backoff strategy. +func WithBackOff(b BackOff) RetryOption { + return func(args *retryOptions) { + args.BackOff = b + } +} + +// withTimer sets a custom timer for managing delays between retries. +func withTimer(t timer) RetryOption { + return func(args *retryOptions) { + args.Timer = t + } +} + +// WithNotify sets a notification function to handle retry errors. +func WithNotify(n Notify) RetryOption { + return func(args *retryOptions) { + args.Notify = n + } +} + +// WithMaxTries limits the number of retry attempts. +func WithMaxTries(n uint) RetryOption { + return func(args *retryOptions) { + args.MaxTries = n + } +} + +// WithMaxElapsedTime limits the total duration for retry attempts. +func WithMaxElapsedTime(d time.Duration) RetryOption { + return func(args *retryOptions) { + args.MaxElapsedTime = d + } +} + +// Retry attempts the operation until success, a permanent error, or backoff completion. +// It ensures the operation is executed at least once. +// +// Returns the operation result or error if retries are exhausted or context is cancelled. +func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) { + // Initialize default retry options. + args := &retryOptions{ + BackOff: NewExponentialBackOff(), + Timer: &defaultTimer{}, + MaxElapsedTime: DefaultMaxElapsedTime, + } + + // Apply user-provided options to the default settings. + for _, opt := range opts { + opt(args) + } + + defer args.Timer.Stop() + + startedAt := time.Now() + args.BackOff.Reset() + for numTries := uint(1); ; numTries++ { + // Execute the operation. + res, err := operation() + if err == nil { + return res, nil + } + + // Stop retrying if maximum tries exceeded. + if args.MaxTries > 0 && numTries >= args.MaxTries { + return res, err + } + + // Handle permanent errors without retrying. + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, err + } + + // Stop retrying if context is cancelled. + if cerr := context.Cause(ctx); cerr != nil { + return res, cerr + } + + // Calculate next backoff duration. + next := args.BackOff.NextBackOff() + if next == Stop { + return res, err + } + + // Reset backoff if RetryAfterError is encountered. + var retryAfter *RetryAfterError + if errors.As(err, &retryAfter) { + next = retryAfter.Duration + args.BackOff.Reset() + } + + // Stop retrying if maximum elapsed time exceeded. + if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime { + return res, err + } + + // Notify on error if a notifier function is provided. + if args.Notify != nil { + args.Notify(err, next) + } + + // Wait for the next backoff period or context cancellation. + args.Timer.Start(next) + select { + case <-args.Timer.C(): + case <-ctx.Done(): + return res, context.Cause(ctx) + } + } +} diff --git a/vendor/github.com/cenkalti/backoff/v5/ticker.go b/vendor/github.com/cenkalti/backoff/v5/ticker.go new file mode 100644 index 000000000000..f0d4b2ae7213 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/ticker.go @@ -0,0 +1,83 @@ +package backoff + +import ( + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + timer timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + timer: &defaultTimer{}, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v5/timer.go b/vendor/github.com/cenkalti/backoff/v5/timer.go new file mode 100644 index 000000000000..a895309747da --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go index bb3014464e52..58819e872a28 100644 --- a/vendor/github.com/google/cel-go/cel/env.go +++ b/vendor/github.com/google/cel-go/cel/env.go @@ -27,6 +27,7 @@ import ( "github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/env" + "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/stdlib" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" @@ -142,6 +143,9 @@ type Env struct { validators []ASTValidator costOptions []checker.CostOption + funcBindOnce sync.Once + functionBindings []*functions.Overload + // Internal parser representation prsr *parser.Parser prsrOpts []parser.Option @@ -320,18 +324,19 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) { return nil, err } return (&Env{ - variables: []*decls.VariableDecl{}, - functions: map[string]*decls.FunctionDecl{}, - macros: []parser.Macro{}, - Container: containers.DefaultContainer, - adapter: registry, - provider: registry, - features: map[int]bool{}, - appliedFeatures: map[int]bool{}, - libraries: map[string]SingletonLibrary{}, - validators: []ASTValidator{}, - progOpts: []ProgramOption{}, - costOptions: []checker.CostOption{}, + variables: []*decls.VariableDecl{}, + functions: map[string]*decls.FunctionDecl{}, + functionBindings: []*functions.Overload{}, + macros: []parser.Macro{}, + Container: containers.DefaultContainer, + adapter: registry, + provider: registry, + features: map[int]bool{}, + appliedFeatures: map[int]bool{}, + libraries: map[string]SingletonLibrary{}, + validators: []ASTValidator{}, + progOpts: []ProgramOption{}, + costOptions: []checker.CostOption{}, }).configure(opts) } diff --git a/vendor/github.com/google/cel-go/cel/folding.go b/vendor/github.com/google/cel-go/cel/folding.go index 40d843ecea30..d1ea6b19dbe1 100644 --- a/vendor/github.com/google/cel-go/cel/folding.go +++ b/vendor/github.com/google/cel-go/cel/folding.go @@ -38,7 +38,7 @@ func MaxConstantFoldIterations(limit int) ConstantFoldingOption { } } -// Adds an Activation which provides known values for the folding evaluator +// FoldKnownValues adds an Activation which provides known values for the folding evaluator // // Any values the activation provides will be used by the constant folder and turned into // literals in the AST. diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go index 24f41a4a77ee..ec3869bdb4a3 100644 --- a/vendor/github.com/google/cel-go/cel/program.go +++ b/vendor/github.com/google/cel-go/cel/program.go @@ -20,6 +20,7 @@ import ( "sync" "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" @@ -191,16 +192,25 @@ func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) { } } - // Add the function bindings created via Function() options. - for _, fn := range e.functions { - bindings, err := fn.Bindings() - if err != nil { - return nil, err - } - err = disp.Add(bindings...) - if err != nil { - return nil, err + e.funcBindOnce.Do(func() { + var bindings []*functions.Overload + e.functionBindings = []*functions.Overload{} + for _, fn := range e.functions { + bindings, err = fn.Bindings() + if err != nil { + return + } + e.functionBindings = append(e.functionBindings, bindings...) } + }) + if err != nil { + return nil, err + } + + // Add the function bindings created via Function() options. + err = disp.Add(e.functionBindings...) + if err != nil { + return nil, err } // Set the attribute factory after the options have been set. diff --git a/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl b/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl index d6b3da5c6c0b..d0b0133f151f 100644 --- a/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl +++ b/vendor/github.com/google/cel-go/cel/templates/authoring.tmpl @@ -1,4 +1,8 @@ -{{define "variable"}}{{.Name}} is a {{.Type}} +{{define "variable"}}{{.Name}} is a {{.Type}}{{if .Description}} + +{{range split .Description}} {{.}} +{{end}} +{{- end -}} {{- end -}} {{define "macro" -}} diff --git a/vendor/github.com/google/cel-go/cel/validator.go b/vendor/github.com/google/cel-go/cel/validator.go index 5f06b2dd55f3..952f88f41b8b 100644 --- a/vendor/github.com/google/cel-go/cel/validator.go +++ b/vendor/github.com/google/cel-go/cel/validator.go @@ -45,6 +45,14 @@ var ( astValidatorFactories = map[string]ASTValidatorFactory{ nestingLimitValidatorName: func(val *env.Validator) (ASTValidator, error) { if limit, found := val.ConfigValue("limit"); found { + // In case of protos, config value is of type by google.protobuf.Value, which numeric values are always a double. + if val, isDouble := limit.(float64); isDouble { + if val != float64(int64(val)) { + return nil, fmt.Errorf("invalid validator: %s, limit value is not a whole number: %v", nestingLimitValidatorName, limit) + } + return ValidateComprehensionNestingLimit(int(val)), nil + } + if val, isInt := limit.(int); isInt { return ValidateComprehensionNestingLimit(val), nil } diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go index bdd474c95af9..171494f075a9 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/type.go +++ b/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -472,7 +472,7 @@ func unwrap(desc description, msg proto.Message) (any, bool, error) { } return v.GetValue(), true, nil } - return msg, false, nil + return unwrapDynamic(desc, msg.ProtoReflect()) } // unwrapDynamic unwraps a reflected protobuf Message value. diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go index 661984cbb127..ceaa274b740b 100644 --- a/vendor/github.com/google/cel-go/ext/native.go +++ b/vendor/github.com/google/cel-go/ext/native.go @@ -609,7 +609,8 @@ func newNativeTypes(fieldNameHandler NativeTypesFieldNameHandler, rawType reflec var iterateStructMembers func(reflect.Type) iterateStructMembers = func(t reflect.Type) { if k := t.Kind(); k == reflect.Pointer || k == reflect.Slice || k == reflect.Array || k == reflect.Map { - t = t.Elem() + iterateStructMembers(t.Elem()) + return } if t.Kind() != reflect.Struct { return diff --git a/vendor/github.com/operator-framework/api/LICENSE b/vendor/github.com/operator-framework/api/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/operator-framework/api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/operator-framework/api/pkg/lib/version/version.go b/vendor/github.com/operator-framework/api/pkg/lib/version/version.go new file mode 100644 index 000000000000..a0ffb9fcbe0a --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/lib/version/version.go @@ -0,0 +1,67 @@ +package version + +import ( + "encoding/json" + + semver "github.com/blang/semver/v4" +) + +// +k8s:openapi-gen=true +// OperatorVersion is a wrapper around semver.Version which supports correct +// marshaling to YAML and JSON. +// +kubebuilder:validation:Type=string +type OperatorVersion struct { + semver.Version `json:"-"` +} + +// DeepCopyInto creates a deep-copy of the Version value. +func (v *OperatorVersion) DeepCopyInto(out *OperatorVersion) { + out.Major = v.Major + out.Minor = v.Minor + out.Patch = v.Patch + + if v.Pre != nil { + pre := make([]semver.PRVersion, len(v.Pre)) + copy(pre, v.Pre) + out.Pre = pre + } + + if v.Build != nil { + build := make([]string, len(v.Build)) + copy(build, v.Build) + out.Build = build + } +} + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v OperatorVersion) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *OperatorVersion) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + version := semver.Version{} + version, err = semver.ParseTolerant(versionString) + if err != nil { + return err + } + v.Version = version + return +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ OperatorVersion) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// "semver" is not a standard openapi format but tooling may use the value regardless +func (_ OperatorVersion) OpenAPISchemaFormat() string { return "semver" } diff --git a/vendor/github.com/operator-framework/api/pkg/operators/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/doc.go new file mode 100644 index 000000000000..7eba794488ba --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/doc.go @@ -0,0 +1,4 @@ +// +kubebuilder:skip + +// Package operators contains all resource types of the operators.coreos.com API group. +package operators diff --git a/vendor/github.com/operator-framework/api/pkg/operators/register.go b/vendor/github.com/operator-framework/api/pkg/operators/register.go new file mode 100644 index 000000000000..e3c31d51ac22 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/register.go @@ -0,0 +1,31 @@ +package operators + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // GroupName is the group name used in this package. + GroupName = "operators.coreos.com" + // GroupVersion is the group version used in this package. + GroupVersion = runtime.APIVersionInternal + + // LEGACY: Exported kind names, remove after major version bump + + // ClusterServiceVersionKind is the kind name for ClusterServiceVersion resources. + ClusterServiceVersionKind = "ClusterServiceVersion" + // CatalogSourceKind is the kind name for CatalogSource resources. + CatalogSourceKind = "CatalogSource" + // InstallPlanKind is the kind name for InstallPlan resources. + InstallPlanKind = "InstallPlan" + // SubscriptionKind is the kind name for Subscription resources. + SubscriptionKind = "Subscription" + // OperatorKind is the kind name for Operator resources. + OperatorKind = "Operator" + // OperatorGroupKind is the kind name for OperatorGroup resources. + OperatorGroupKind = "OperatorGroup" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go new file mode 100644 index 000000000000..8386b2032041 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go @@ -0,0 +1,364 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + CatalogSourceCRDAPIVersion = GroupName + "/" + GroupVersion + CatalogSourceKind = "CatalogSource" + DefaultRegistryPollDuration = 15 * time.Minute +) + +// SourceType indicates the type of backing store for a CatalogSource +type SourceType string + +const ( + // SourceTypeInternal (deprecated) specifies a CatalogSource of type SourceTypeConfigmap + SourceTypeInternal SourceType = "internal" + + // SourceTypeConfigmap specifies a CatalogSource that generates a configmap-server registry + SourceTypeConfigmap SourceType = "configmap" + + // SourceTypeGrpc specifies a CatalogSource that can use an operator registry image to generate a + // registry-server or connect to a pre-existing registry at an address. + SourceTypeGrpc SourceType = "grpc" +) + +const ( + // CatalogSourceSpecInvalidError denotes when fields on the spec of the CatalogSource are not valid. + CatalogSourceSpecInvalidError ConditionReason = "SpecInvalidError" + // CatalogSourceConfigMapError denotes when there is an issue extracting manifests from the specified ConfigMap. + CatalogSourceConfigMapError ConditionReason = "ConfigMapError" + // CatalogSourceRegistryServerError denotes when there is an issue querying the specified registry server. + CatalogSourceRegistryServerError ConditionReason = "RegistryServerError" + // CatalogSourceIntervalInvalidError denotes if the registry polling interval is invalid. + CatalogSourceIntervalInvalidError ConditionReason = "InvalidIntervalError" +) + +type CatalogSourceSpec struct { + // SourceType is the type of source + SourceType SourceType `json:"sourceType"` + + // Priority field assigns a weight to the catalog source to prioritize them so that it can be consumed by the dependency resolver. + // Usage: + // Higher weight indicates that this catalog source is preferred over lower weighted catalog sources during dependency resolution. + // The range of the priority value can go from positive to negative in the range of int32. + // The default value to a catalog source with unassigned priority would be 0. + // The catalog source with the same priority values will be ranked lexicographically based on its name. + // +optional + Priority int `json:"priority,omitempty"` + + // ConfigMap is the name of the ConfigMap to be used to back a configmap-server registry. + // Only used when SourceType = SourceTypeConfigmap or SourceTypeInternal. + // +optional + ConfigMap string `json:"configMap,omitempty"` + + // Address is a host that OLM can use to connect to a pre-existing registry. + // Format: : + // Only used when SourceType = SourceTypeGrpc. + // Ignored when the Image field is set. + // +optional + Address string `json:"address,omitempty"` + + // Image is an operator-registry container image to instantiate a registry-server with. + // Only used when SourceType = SourceTypeGrpc. + // If present, the address field is ignored. + // +optional + Image string `json:"image,omitempty"` + + // GrpcPodConfig exposes different overrides for the pod spec of the CatalogSource Pod. + // Only used when SourceType = SourceTypeGrpc and Image is set. + // +optional + GrpcPodConfig *GrpcPodConfig `json:"grpcPodConfig,omitempty"` + + // UpdateStrategy defines how updated catalog source images can be discovered + // Consists of an interval that defines polling duration and an embedded strategy type + // +optional + UpdateStrategy *UpdateStrategy `json:"updateStrategy,omitempty"` + + // Secrets represent set of secrets that can be used to access the contents of the catalog. + // It is best to keep this list small, since each will need to be tried for every catalog entry. + // +optional + Secrets []string `json:"secrets,omitempty"` + + // Metadata + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + Publisher string `json:"publisher,omitempty"` + Icon Icon `json:"icon,omitempty"` +} + +type SecurityConfig string + +const ( + Legacy SecurityConfig = "legacy" + Restricted SecurityConfig = "restricted" +) + +// GrpcPodConfig contains configuration specified for a catalog source +type GrpcPodConfig struct { + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations are the catalog source's pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // Affinity is the catalog source's pod's affinity. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // If specified, indicates the pod's priority. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` + + // SecurityContextConfig can be one of `legacy` or `restricted`. The CatalogSource's pod is either injected with the + // right pod.spec.securityContext and pod.spec.container[*].securityContext values to allow the pod to run in Pod + // Security Admission (PSA) `restricted` mode, or doesn't set these values at all, in which case the pod can only be + // run in PSA `baseline` or `privileged` namespaces. If the SecurityContextConfig is unspecified, the mode will be + // determined by the namespace's PSA configuration. If the namespace is enforcing `restricted` mode, then the pod + // will be configured as if `restricted` was specified. Otherwise, it will be configured as if `legacy` was + // specified. Specifying a value other than `legacy` or `restricted` result in a validation error. When using older + // catalog images, which can not run in `restricted` mode, the SecurityContextConfig should be set to `legacy`. + // + // More information about PSA can be found here: https://kubernetes.io/docs/concepts/security/pod-security-admission/ + // +optional + // +kubebuilder:validation:Enum=legacy;restricted + SecurityContextConfig SecurityConfig `json:"securityContextConfig,omitempty"` + + // MemoryTarget configures the $GOMEMLIMIT value for the gRPC catalog Pod. This is a soft memory limit for the server, + // which the runtime will attempt to meet but makes no guarantees that it will do so. If this value is set, the Pod + // will have the following modifications made to the container running the server: + // - the $GOMEMLIMIT environment variable will be set to this value in bytes + // - the memory request will be set to this value + // + // This field should be set if it's desired to reduce the footprint of a catalog server as much as possible, or if + // a catalog being served is very large and needs more than the default allocation. If your index image has a file- + // system cache, determine a good approximation for this value by doubling the size of the package cache at + // /tmp/cache/cache/packages.json in the index image. + // + // This field is best-effort; if unset, no default will be used and no Pod memory limit or $GOMEMLIMIT value will be set. + // +optional + MemoryTarget *resource.Quantity `json:"memoryTarget,omitempty"` + + // ExtractContent configures the gRPC catalog Pod to extract catalog metadata from the provided index image and + // use a well-known version of the `opm` server to expose it. The catalog index image that this CatalogSource is + // configured to use *must* be using the file-based catalogs in order to utilize this feature. + // +optional + ExtractContent *ExtractContentConfig `json:"extractContent,omitempty"` +} + +// ExtractContentConfig configures context extraction from a file-based catalog index image. +type ExtractContentConfig struct { + // CacheDir is the (optional) directory storing the pre-calculated API cache. + CacheDir string `json:"cacheDir,omitempty"` + // CatalogDir is the directory storing the file-based catalog contents. + CatalogDir string `json:"catalogDir"` +} + +// UpdateStrategy holds all the different types of catalog source update strategies +// Currently only registry polling strategy is implemented +type UpdateStrategy struct { + *RegistryPoll `json:"registryPoll,omitempty"` +} + +type RegistryPoll struct { + // Interval is used to determine the time interval between checks of the latest catalog source version. + // The catalog operator polls to see if a new version of the catalog source is available. + // If available, the latest image is pulled and gRPC traffic is directed to the latest catalog source. + RawInterval string `json:"interval,omitempty"` + Interval *metav1.Duration `json:"-"` + ParsingError string `json:"-"` +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (u *UpdateStrategy) UnmarshalJSON(data []byte) (err error) { + type alias struct { + *RegistryPoll `json:"registryPoll,omitempty"` + } + us := alias{} + if err = json.Unmarshal(data, &us); err != nil { + return err + } + registryPoll := &RegistryPoll{ + RawInterval: us.RegistryPoll.RawInterval, + } + duration, err := time.ParseDuration(registryPoll.RawInterval) + if err != nil { + registryPoll.ParsingError = fmt.Sprintf("error parsing spec.updateStrategy.registryPoll.interval. Using the default value of %s instead. Error: %s", DefaultRegistryPollDuration, err) + registryPoll.Interval = &metav1.Duration{Duration: DefaultRegistryPollDuration} + } else { + registryPoll.Interval = &metav1.Duration{Duration: duration} + } + u.RegistryPoll = registryPoll + return nil +} + +type RegistryServiceStatus struct { + Protocol string `json:"protocol,omitempty"` + ServiceName string `json:"serviceName,omitempty"` + ServiceNamespace string `json:"serviceNamespace,omitempty"` + Port string `json:"port,omitempty"` + CreatedAt metav1.Time `json:"createdAt,omitempty"` +} + +func (s *RegistryServiceStatus) Address() string { + return fmt.Sprintf("%s.%s.svc:%s", s.ServiceName, s.ServiceNamespace, s.Port) +} + +type GRPCConnectionState struct { + Address string `json:"address,omitempty"` + LastObservedState string `json:"lastObservedState"` + LastConnectTime metav1.Time `json:"lastConnect,omitempty"` +} + +type CatalogSourceStatus struct { + // A human readable message indicating details about why the CatalogSource is in this condition. + // +optional + Message string `json:"message,omitempty"` + // Reason is the reason the CatalogSource was transitioned to its current state. + // +optional + Reason ConditionReason `json:"reason,omitempty"` + + // The last time the CatalogSource image registry has been polled to ensure the image is up-to-date + LatestImageRegistryPoll *metav1.Time `json:"latestImageRegistryPoll,omitempty"` + + // ConfigMapReference (deprecated) is the reference to the ConfigMap containing the catalog source's configuration, when the catalog source is a ConfigMap + ConfigMapResource *ConfigMapResourceReference `json:"configMapReference,omitempty"` + // RegistryService represents the current state of the GRPC service used to serve the catalog + RegistryServiceStatus *RegistryServiceStatus `json:"registryService,omitempty"` + // ConnectionState represents the current state of the CatalogSource's connection to the registry + GRPCConnectionState *GRPCConnectionState `json:"connectionState,omitempty"` + + // Represents the state of a CatalogSource. Note that Message and Reason represent the original + // status information, which may be migrated to be conditions based in the future. Any new features + // introduced will use conditions. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +type ConfigMapResourceReference struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + UID types.UID `json:"uid,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` +} + +func (r *ConfigMapResourceReference) IsAMatch(object *metav1.ObjectMeta) bool { + return r.UID == object.GetUID() && r.ResourceVersion == object.GetResourceVersion() +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +kubebuilder:resource:shortName=catsrc,categories=olm +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Display",type=string,JSONPath=`.spec.displayName`,description="The pretty name of the catalog" +// +kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.spec.sourceType`,description="The type of the catalog" +// +kubebuilder:printcolumn:name="Publisher",type=string,JSONPath=`.spec.publisher`,description="The publisher of the catalog" +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// CatalogSource is a repository of CSVs, CRDs, and operator packages. +type CatalogSource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec CatalogSourceSpec `json:"spec"` + // +optional + Status CatalogSourceStatus `json:"status"` +} + +func (c *CatalogSource) Address() string { + if c.Spec.Address != "" { + return c.Spec.Address + } + return c.Status.RegistryServiceStatus.Address() +} + +func (c *CatalogSource) SetError(reason ConditionReason, err error) { + c.Status.Reason = reason + c.Status.Message = "" + if err != nil { + c.Status.Message = err.Error() + } +} + +func (c *CatalogSource) SetLastUpdateTime() { + now := metav1.Now() + c.Status.LatestImageRegistryPoll = &now +} + +// Check if it is time to update based on polling setting +func (c *CatalogSource) Update() bool { + if !c.Poll() { + return false + } + interval := c.Spec.UpdateStrategy.Interval.Duration + latest := c.Status.LatestImageRegistryPoll + if latest == nil { + logrus.WithField("CatalogSource", c.Name).Debugf("latest poll %v", latest) + } else { + logrus.WithField("CatalogSource", c.Name).Debugf("latest poll %v", *c.Status.LatestImageRegistryPoll) + } + + if c.Status.LatestImageRegistryPoll.IsZero() { + logrus.WithField("CatalogSource", c.Name).Debugf("creation timestamp plus interval before now %t", c.CreationTimestamp.Add(interval).Before(time.Now())) + if c.CreationTimestamp.Add(interval).Before(time.Now()) { + return true + } + } else { + logrus.WithField("CatalogSource", c.Name).Debugf("latest poll plus interval before now %t", c.Status.LatestImageRegistryPoll.Add(interval).Before(time.Now())) + if c.Status.LatestImageRegistryPoll.Add(interval).Before(time.Now()) { + return true + } + } + + return false +} + +// Poll determines whether the polling feature is enabled on the particular catalog source +func (c *CatalogSource) Poll() bool { + if c.Spec.UpdateStrategy == nil { + return false + } + // if polling interval is zero polling will not be done + if c.Spec.UpdateStrategy.RegistryPoll == nil { + return false + } + // if catalog source is not backed by an image polling will not be done + if c.Spec.Image == "" { + return false + } + // if image is not type gRPC polling will not be done + if c.Spec.SourceType != SourceTypeGrpc { + return false + } + return true +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CatalogSourceList is a repository of CSVs, CRDs, and operator packages. +type CatalogSourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []CatalogSource `json:"items"` +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go new file mode 100644 index 000000000000..a4c8d1746960 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go @@ -0,0 +1,215 @@ +package v1alpha1 + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" +) + +const ( + CopiedLabelKey = "olm.copiedFrom" + + // ConditionsLengthLimit is the maximum length of Status.Conditions of a + // given ClusterServiceVersion object. The oldest condition(s) are removed + // from the list as it grows over time to keep it at limit. + ConditionsLengthLimit = 20 +) + +// obsoleteReasons are the set of reasons that mean a CSV should no longer be processed as active +var obsoleteReasons = map[ConditionReason]struct{}{ + CSVReasonReplaced: {}, + CSVReasonBeingReplaced: {}, +} + +// uncopiableReasons are the set of reasons that should prevent a CSV from being copied to target namespaces +var uncopiableReasons = map[ConditionReason]struct{}{ + CSVReasonCopied: {}, + CSVReasonInvalidInstallModes: {}, + CSVReasonNoTargetNamespaces: {}, + CSVReasonUnsupportedOperatorGroup: {}, + CSVReasonNoOperatorGroup: {}, + CSVReasonTooManyOperatorGroups: {}, + CSVReasonInterOperatorGroupOwnerConflict: {}, + CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs: {}, +} + +// safeToAnnotateOperatorGroupReasons are the set of reasons that it's safe to attempt to update the operatorgroup +// annotations +var safeToAnnotateOperatorGroupReasons = map[ConditionReason]struct{}{ + CSVReasonOwnerConflict: {}, + CSVReasonInstallSuccessful: {}, + CSVReasonInvalidInstallModes: {}, + CSVReasonNoTargetNamespaces: {}, + CSVReasonUnsupportedOperatorGroup: {}, + CSVReasonNoOperatorGroup: {}, + CSVReasonTooManyOperatorGroups: {}, + CSVReasonInterOperatorGroupOwnerConflict: {}, + CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs: {}, +} + +// SetPhaseWithEventIfChanged emits a Kubernetes event with details of a phase change and sets the current phase if phase, reason, or message would changed +func (c *ClusterServiceVersion) SetPhaseWithEventIfChanged(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time, recorder record.EventRecorder) { + if c.Status.Phase == phase && c.Status.Reason == reason && c.Status.Message == message { + return + } + + c.SetPhaseWithEvent(phase, reason, message, now, recorder) +} + +// SetPhaseWithEvent generates a Kubernetes event with details about the phase change and sets the current phase +func (c *ClusterServiceVersion) SetPhaseWithEvent(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time, recorder record.EventRecorder) { + var eventtype string + if phase == CSVPhaseFailed { + eventtype = v1.EventTypeWarning + } else { + eventtype = v1.EventTypeNormal + } + go recorder.Event(c, eventtype, string(reason), message) + c.SetPhase(phase, reason, message, now) +} + +// SetPhase sets the current phase and adds a condition if necessary +func (c *ClusterServiceVersion) SetPhase(phase ClusterServiceVersionPhase, reason ConditionReason, message string, now *metav1.Time) { + newCondition := func() ClusterServiceVersionCondition { + return ClusterServiceVersionCondition{ + Phase: c.Status.Phase, + LastTransitionTime: c.Status.LastTransitionTime, + LastUpdateTime: c.Status.LastUpdateTime, + Message: message, + Reason: reason, + } + } + + defer c.TrimConditionsIfLimitExceeded() + + c.Status.LastUpdateTime = now + if c.Status.Phase != phase { + c.Status.Phase = phase + c.Status.LastTransitionTime = now + } + c.Status.Message = message + c.Status.Reason = reason + if len(c.Status.Conditions) == 0 { + c.Status.Conditions = append(c.Status.Conditions, newCondition()) + return + } + + previousCondition := c.Status.Conditions[len(c.Status.Conditions)-1] + if previousCondition.Phase != c.Status.Phase || previousCondition.Reason != c.Status.Reason { + c.Status.Conditions = append(c.Status.Conditions, newCondition()) + } +} + +// SetRequirementStatus adds the status of all requirements to the CSV status +func (c *ClusterServiceVersion) SetRequirementStatus(statuses []RequirementStatus) { + c.Status.RequirementStatus = statuses +} + +// IsObsolete returns if this CSV is being replaced or is marked for deletion +func (c *ClusterServiceVersion) IsObsolete() bool { + for _, condition := range c.Status.Conditions { + _, ok := obsoleteReasons[condition.Reason] + if ok { + return true + } + } + return false +} + +// IsCopied returns true if the CSV has been copied and false otherwise. +func (c *ClusterServiceVersion) IsCopied() bool { + return c.Status.Reason == CSVReasonCopied || IsCopied(c) +} + +func IsCopied(o metav1.Object) bool { + annotations := o.GetAnnotations() + if annotations != nil { + operatorNamespace, ok := annotations[OperatorGroupNamespaceAnnotationKey] + if ok && o.GetNamespace() != operatorNamespace { + return true + } + } + + if labels := o.GetLabels(); labels != nil { + if _, ok := labels[CopiedLabelKey]; ok { + return true + } + } + return false +} + +func (c *ClusterServiceVersion) IsUncopiable() bool { + if c.Status.Phase == CSVPhaseNone { + return true + } + _, ok := uncopiableReasons[c.Status.Reason] + return ok +} + +func (c *ClusterServiceVersion) IsSafeToUpdateOperatorGroupAnnotations() bool { + _, ok := safeToAnnotateOperatorGroupReasons[c.Status.Reason] + return ok +} + +// NewInstallModeSet returns an InstallModeSet instantiated from the given list of InstallModes. +// If the given list is not a set, an error is returned. +func NewInstallModeSet(modes []InstallMode) (InstallModeSet, error) { + set := InstallModeSet{} + for _, mode := range modes { + if _, exists := set[mode.Type]; exists { + return nil, fmt.Errorf("InstallMode list contains duplicates, cannot make set: %v", modes) + } + set[mode.Type] = mode.Supported + } + + return set, nil +} + +// Supports returns an error if the InstallModeSet does not support configuration for +// the given operatorNamespace and list of target namespaces. +func (set InstallModeSet) Supports(operatorNamespace string, namespaces []string) error { + numNamespaces := len(namespaces) + switch { + case numNamespaces == 0: + return fmt.Errorf("operatorgroup has invalid selected namespaces, cannot configure to watch zero namespaces") + case numNamespaces == 1: + switch namespaces[0] { + case operatorNamespace: + if !set[InstallModeTypeOwnNamespace] { + return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch own namespace", InstallModeTypeOwnNamespace) + } + case v1.NamespaceAll: + if !set[InstallModeTypeAllNamespaces] { + return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch all namespaces", InstallModeTypeAllNamespaces) + } + default: + if !set[InstallModeTypeSingleNamespace] { + return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch one namespace", InstallModeTypeSingleNamespace) + } + } + case numNamespaces > 1 && !set[InstallModeTypeMultiNamespace]: + return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch %d namespaces", InstallModeTypeMultiNamespace, numNamespaces) + case numNamespaces > 1: + for _, namespace := range namespaces { + if namespace == operatorNamespace && !set[InstallModeTypeOwnNamespace] { + return fmt.Errorf("%s InstallModeType not supported, cannot configure to watch own namespace", InstallModeTypeOwnNamespace) + } + if namespace == v1.NamespaceAll { + return fmt.Errorf("operatorgroup has invalid selected namespaces, NamespaceAll found when |selected namespaces| > 1") + } + } + } + + return nil +} + +func (c *ClusterServiceVersion) TrimConditionsIfLimitExceeded() { + if len(c.Status.Conditions) <= ConditionsLengthLimit { + return + } + + firstIndex := len(c.Status.Conditions) - ConditionsLengthLimit + c.Status.Conditions = c.Status.Conditions[firstIndex:len(c.Status.Conditions)] +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go new file mode 100644 index 000000000000..3e6d3248037e --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go @@ -0,0 +1,737 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + rbac "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/operator-framework/api/pkg/lib/version" +) + +const ( + ClusterServiceVersionAPIVersion = GroupName + "/" + GroupVersion + ClusterServiceVersionKind = "ClusterServiceVersion" + OperatorGroupNamespaceAnnotationKey = "olm.operatorNamespace" + InstallStrategyNameDeployment = "deployment" + SkipRangeAnnotationKey = "olm.skipRange" +) + +// InstallModeType is a supported type of install mode for CSV installation +type InstallModeType string + +const ( + // InstallModeTypeOwnNamespace indicates that the operator can be a member of an `OperatorGroup` that selects its own namespace. + InstallModeTypeOwnNamespace InstallModeType = "OwnNamespace" + // InstallModeTypeSingleNamespace indicates that the operator can be a member of an `OperatorGroup` that selects one namespace. + InstallModeTypeSingleNamespace InstallModeType = "SingleNamespace" + // InstallModeTypeMultiNamespace indicates that the operator can be a member of an `OperatorGroup` that selects more than one namespace. + InstallModeTypeMultiNamespace InstallModeType = "MultiNamespace" + // InstallModeTypeAllNamespaces indicates that the operator can be a member of an `OperatorGroup` that selects all namespaces (target namespace set is the empty string ""). + InstallModeTypeAllNamespaces InstallModeType = "AllNamespaces" +) + +// InstallMode associates an InstallModeType with a flag representing if the CSV supports it +// +k8s:openapi-gen=true +type InstallMode struct { + Type InstallModeType `json:"type"` + Supported bool `json:"supported"` +} + +// InstallModeSet is a mapping of unique InstallModeTypes to whether they are supported. +type InstallModeSet map[InstallModeType]bool + +// NamedInstallStrategy represents the block of an ClusterServiceVersion resource +// where the install strategy is specified. +// +k8s:openapi-gen=true +type NamedInstallStrategy struct { + StrategyName string `json:"strategy"` + StrategySpec StrategyDetailsDeployment `json:"spec,omitempty"` +} + +// StrategyDeploymentPermissions describe the rbac rules and service account needed by the install strategy +// +k8s:openapi-gen=true +type StrategyDeploymentPermissions struct { + ServiceAccountName string `json:"serviceAccountName"` + Rules []rbac.PolicyRule `json:"rules"` +} + +// StrategyDeploymentSpec contains the name, spec and labels for the deployment ALM should create +// +k8s:openapi-gen=true +type StrategyDeploymentSpec struct { + Name string `json:"name"` + Spec appsv1.DeploymentSpec `json:"spec"` + Label labels.Set `json:"label,omitempty"` +} + +// StrategyDetailsDeployment represents the parsed details of a Deployment +// InstallStrategy. +// +k8s:openapi-gen=true +type StrategyDetailsDeployment struct { + DeploymentSpecs []StrategyDeploymentSpec `json:"deployments"` + Permissions []StrategyDeploymentPermissions `json:"permissions,omitempty"` + ClusterPermissions []StrategyDeploymentPermissions `json:"clusterPermissions,omitempty"` +} + +func (d *StrategyDetailsDeployment) GetStrategyName() string { + return InstallStrategyNameDeployment +} + +// StatusDescriptor describes a field in a status block of a CRD so that OLM can consume it +// +k8s:openapi-gen=true +type StatusDescriptor struct { + Path string `json:"path"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + XDescriptors []string `json:"x-descriptors,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +// SpecDescriptor describes a field in a spec block of a CRD so that OLM can consume it +// +k8s:openapi-gen=true +type SpecDescriptor struct { + Path string `json:"path"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + XDescriptors []string `json:"x-descriptors,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +// ActionDescriptor describes a declarative action that can be performed on a custom resource instance +// +k8s:openapi-gen=true +type ActionDescriptor struct { + Path string `json:"path"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + XDescriptors []string `json:"x-descriptors,omitempty"` + Value json.RawMessage `json:"value,omitempty"` +} + +// CRDDescription provides details to OLM about the CRDs +// +k8s:openapi-gen=true +type CRDDescription struct { + Name string `json:"name"` + Version string `json:"version"` + Kind string `json:"kind"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + Resources []APIResourceReference `json:"resources,omitempty"` + StatusDescriptors []StatusDescriptor `json:"statusDescriptors,omitempty"` + SpecDescriptors []SpecDescriptor `json:"specDescriptors,omitempty"` + ActionDescriptor []ActionDescriptor `json:"actionDescriptors,omitempty"` +} + +// APIServiceDescription provides details to OLM about apis provided via aggregation +// +k8s:openapi-gen=true +type APIServiceDescription struct { + Name string `json:"name"` + Group string `json:"group"` + Version string `json:"version"` + Kind string `json:"kind"` + DeploymentName string `json:"deploymentName,omitempty"` + ContainerPort int32 `json:"containerPort,omitempty"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + Resources []APIResourceReference `json:"resources,omitempty"` + StatusDescriptors []StatusDescriptor `json:"statusDescriptors,omitempty"` + SpecDescriptors []SpecDescriptor `json:"specDescriptors,omitempty"` + ActionDescriptor []ActionDescriptor `json:"actionDescriptors,omitempty"` +} + +// APIResourceReference is a reference to a Kubernetes resource type that the referrer utilizes. +// +k8s:openapi-gen=true +type APIResourceReference struct { + // Plural name of the referenced resource type (CustomResourceDefinition.Spec.Names[].Plural). Empty string if the referenced resource type is not a custom resource. + Name string `json:"name"` + // Kind of the referenced resource type. + Kind string `json:"kind"` + // API Version of the referenced resource type. + Version string `json:"version"` +} + +// GetName returns the name of an APIService as derived from its group and version. +func (d APIServiceDescription) GetName() string { + return fmt.Sprintf("%s.%s", d.Version, d.Group) +} + +// WebhookAdmissionType is the type of admission webhooks supported by OLM +type WebhookAdmissionType string + +const ( + // ValidatingAdmissionWebhook is for validating admission webhooks + ValidatingAdmissionWebhook WebhookAdmissionType = "ValidatingAdmissionWebhook" + // MutatingAdmissionWebhook is for mutating admission webhooks + MutatingAdmissionWebhook WebhookAdmissionType = "MutatingAdmissionWebhook" + // ConversionWebhook is for conversion webhooks + ConversionWebhook WebhookAdmissionType = "ConversionWebhook" +) + +// WebhookDescription provides details to OLM about required webhooks +// +k8s:openapi-gen=true +type WebhookDescription struct { + GenerateName string `json:"generateName"` + // +kubebuilder:validation:Enum=ValidatingAdmissionWebhook;MutatingAdmissionWebhook;ConversionWebhook + Type WebhookAdmissionType `json:"type"` + DeploymentName string `json:"deploymentName,omitempty"` + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:default=443 + ContainerPort int32 `json:"containerPort,omitempty"` + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` + Rules []admissionregistrationv1.RuleWithOperations `json:"rules,omitempty"` + FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"` + MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty"` + SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects"` + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` + AdmissionReviewVersions []string `json:"admissionReviewVersions"` + ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + WebhookPath *string `json:"webhookPath,omitempty"` + ConversionCRDs []string `json:"conversionCRDs,omitempty"` +} + +// GetValidatingWebhook returns a ValidatingWebhook generated from the WebhookDescription +func (w *WebhookDescription) GetValidatingWebhook(namespace string, namespaceSelector *metav1.LabelSelector, caBundle []byte) admissionregistrationv1.ValidatingWebhook { + return admissionregistrationv1.ValidatingWebhook{ + Name: w.GenerateName, + Rules: w.Rules, + FailurePolicy: w.FailurePolicy, + MatchPolicy: w.MatchPolicy, + NamespaceSelector: namespaceSelector, + ObjectSelector: w.ObjectSelector, + SideEffects: w.SideEffects, + TimeoutSeconds: w.TimeoutSeconds, + AdmissionReviewVersions: w.AdmissionReviewVersions, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &admissionregistrationv1.ServiceReference{ + Name: w.DomainName() + "-service", + Namespace: namespace, + Path: w.WebhookPath, + Port: &w.ContainerPort, + }, + CABundle: caBundle, + }, + } +} + +// GetMutatingWebhook returns a MutatingWebhook generated from the WebhookDescription +func (w *WebhookDescription) GetMutatingWebhook(namespace string, namespaceSelector *metav1.LabelSelector, caBundle []byte) admissionregistrationv1.MutatingWebhook { + return admissionregistrationv1.MutatingWebhook{ + Name: w.GenerateName, + Rules: w.Rules, + FailurePolicy: w.FailurePolicy, + MatchPolicy: w.MatchPolicy, + NamespaceSelector: namespaceSelector, + ObjectSelector: w.ObjectSelector, + SideEffects: w.SideEffects, + TimeoutSeconds: w.TimeoutSeconds, + AdmissionReviewVersions: w.AdmissionReviewVersions, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &admissionregistrationv1.ServiceReference{ + Name: w.DomainName() + "-service", + Namespace: namespace, + Path: w.WebhookPath, + Port: &w.ContainerPort, + }, + CABundle: caBundle, + }, + ReinvocationPolicy: w.ReinvocationPolicy, + } +} + +// DomainName returns the result of replacing all periods in the given Webhook name with hyphens +func (w *WebhookDescription) DomainName() string { + // Replace all '.'s with "-"s to convert to a DNS-1035 label + return strings.Replace(w.DeploymentName, ".", "-", -1) +} + +// CustomResourceDefinitions declares all of the CRDs managed or required by +// an operator being ran by ClusterServiceVersion. +// +// If the CRD is present in the Owned list, it is implicitly required. +// +k8s:openapi-gen=true +type CustomResourceDefinitions struct { + Owned []CRDDescription `json:"owned,omitempty"` + Required []CRDDescription `json:"required,omitempty"` +} + +// APIServiceDefinitions declares all of the extension apis managed or required by +// an operator being ran by ClusterServiceVersion. +// +k8s:openapi-gen=true +type APIServiceDefinitions struct { + Owned []APIServiceDescription `json:"owned,omitempty"` + Required []APIServiceDescription `json:"required,omitempty"` +} + +// ClusterServiceVersionSpec declarations tell OLM how to install an operator +// that can manage apps for a given version. +// +k8s:openapi-gen=true +type ClusterServiceVersionSpec struct { + InstallStrategy NamedInstallStrategy `json:"install"` + Version version.OperatorVersion `json:"version,omitempty"` + Maturity string `json:"maturity,omitempty"` + CustomResourceDefinitions CustomResourceDefinitions `json:"customresourcedefinitions,omitempty"` + APIServiceDefinitions APIServiceDefinitions `json:"apiservicedefinitions,omitempty"` + WebhookDefinitions []WebhookDescription `json:"webhookdefinitions,omitempty"` + NativeAPIs []metav1.GroupVersionKind `json:"nativeAPIs,omitempty"` + MinKubeVersion string `json:"minKubeVersion,omitempty"` + + // The name of the operator in display format. + DisplayName string `json:"displayName"` + + // Description of the operator. Can include the features, limitations or use-cases of the + // operator. + // +optional + Description string `json:"description,omitempty"` + + // A list of keywords describing the operator. + // +optional + Keywords []string `json:"keywords,omitempty"` + + // A list of organizational entities maintaining the operator. + // +optional + Maintainers []Maintainer `json:"maintainers,omitempty"` + + // The publishing entity behind the operator. + // +optional + Provider AppLink `json:"provider,omitempty"` + + // A list of links related to the operator. + // +optional + Links []AppLink `json:"links,omitempty"` + + // The icon for this operator. + // +optional + Icon []Icon `json:"icon,omitempty"` + + // InstallModes specify supported installation types + // +optional + InstallModes []InstallMode `json:"installModes,omitempty"` + + // The name of a CSV this one replaces. Should match the `metadata.Name` field of the old CSV. + // +optional + Replaces string `json:"replaces,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // Label selector for related resources. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Cleanup specifies the cleanup behaviour when the CSV gets deleted + // +optional + Cleanup CleanupSpec `json:"cleanup,omitempty"` + + // The name(s) of one or more CSV(s) that should be skipped in the upgrade graph. + // Should match the `metadata.Name` field of the CSV that should be skipped. + // This field is only used during catalog creation and plays no part in cluster runtime. + // +optional + Skips []string `json:"skips,omitempty"` + + // List any related images, or other container images that your Operator might require to perform their functions. + // This list should also include operand images as well. All image references should be specified by + // digest (SHA) and not by tag. This field is only used during catalog creation and plays no part in cluster runtime. + // +optional + RelatedImages []RelatedImage `json:"relatedImages,omitempty"` +} + +// +k8s:openapi-gen=true +type CleanupSpec struct { + Enabled bool `json:"enabled"` +} + +// +k8s:openapi-gen=true +type Maintainer struct { + Name string `json:"name,omitempty"` + Email string `json:"email,omitempty"` +} + +// +k8s:openapi-gen=true +type AppLink struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} + +// +k8s:openapi-gen=true +type Icon struct { + Data string `json:"base64data"` + MediaType string `json:"mediatype"` +} + +// +k8s:openapi-gen=true +type RelatedImage struct { + Name string `json:"name"` + Image string `json:"image"` +} + +// ClusterServiceVersionPhase is a label for the condition of a ClusterServiceVersion at the current time. +type ClusterServiceVersionPhase string + +// These are the valid phases of ClusterServiceVersion +const ( + CSVPhaseNone = "" + // CSVPhasePending means the csv has been accepted by the system, but the install strategy has not been attempted. + // This is likely because there are unmet requirements. + CSVPhasePending ClusterServiceVersionPhase = "Pending" + // CSVPhaseInstallReady means that the requirements are met but the install strategy has not been run. + CSVPhaseInstallReady ClusterServiceVersionPhase = "InstallReady" + // CSVPhaseInstalling means that the install strategy has been initiated but not completed. + CSVPhaseInstalling ClusterServiceVersionPhase = "Installing" + // CSVPhaseSucceeded means that the resources in the CSV were created successfully. + CSVPhaseSucceeded ClusterServiceVersionPhase = "Succeeded" + // CSVPhaseFailed means that the install strategy could not be successfully completed. + CSVPhaseFailed ClusterServiceVersionPhase = "Failed" + // CSVPhaseUnknown means that for some reason the state of the csv could not be obtained. + CSVPhaseUnknown ClusterServiceVersionPhase = "Unknown" + // CSVPhaseReplacing means that a newer CSV has been created and the csv's resources will be transitioned to a new owner. + CSVPhaseReplacing ClusterServiceVersionPhase = "Replacing" + // CSVPhaseDeleting means that a CSV has been replaced by a new one and will be checked for safety before being deleted + CSVPhaseDeleting ClusterServiceVersionPhase = "Deleting" + // CSVPhaseAny matches all other phases in CSV queries + CSVPhaseAny ClusterServiceVersionPhase = "" +) + +// ConditionReason is a camelcased reason for the state transition +type ConditionReason string + +const ( + CSVReasonRequirementsUnknown ConditionReason = "RequirementsUnknown" + CSVReasonRequirementsNotMet ConditionReason = "RequirementsNotMet" + CSVReasonRequirementsMet ConditionReason = "AllRequirementsMet" + CSVReasonOwnerConflict ConditionReason = "OwnerConflict" + CSVReasonComponentFailed ConditionReason = "InstallComponentFailed" + CSVReasonComponentFailedNoRetry ConditionReason = "InstallComponentFailedNoRetry" + CSVReasonInvalidStrategy ConditionReason = "InvalidInstallStrategy" + CSVReasonWaiting ConditionReason = "InstallWaiting" + CSVReasonInstallSuccessful ConditionReason = "InstallSucceeded" + CSVReasonInstallCheckFailed ConditionReason = "InstallCheckFailed" + CSVReasonComponentUnhealthy ConditionReason = "ComponentUnhealthy" + CSVReasonBeingReplaced ConditionReason = "BeingReplaced" + CSVReasonReplaced ConditionReason = "Replaced" + CSVReasonNeedsReinstall ConditionReason = "NeedsReinstall" + CSVReasonNeedsCertRotation ConditionReason = "NeedsCertRotation" + CSVReasonAPIServiceResourceIssue ConditionReason = "APIServiceResourceIssue" + CSVReasonAPIServiceResourcesNeedReinstall ConditionReason = "APIServiceResourcesNeedReinstall" + CSVReasonAPIServiceInstallFailed ConditionReason = "APIServiceInstallFailed" + CSVReasonCopied ConditionReason = "Copied" + CSVReasonInvalidInstallModes ConditionReason = "InvalidInstallModes" + CSVReasonNoTargetNamespaces ConditionReason = "NoTargetNamespaces" + CSVReasonUnsupportedOperatorGroup ConditionReason = "UnsupportedOperatorGroup" + CSVReasonNoOperatorGroup ConditionReason = "NoOperatorGroup" + CSVReasonTooManyOperatorGroups ConditionReason = "TooManyOperatorGroups" + CSVReasonInterOperatorGroupOwnerConflict ConditionReason = "InterOperatorGroupOwnerConflict" + CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs ConditionReason = "CannotModifyStaticOperatorGroupProvidedAPIs" + CSVReasonDetectedClusterChange ConditionReason = "DetectedClusterChange" + CSVReasonInvalidWebhookDescription ConditionReason = "InvalidWebhookDescription" + CSVReasonOperatorConditionNotUpgradeable ConditionReason = "OperatorConditionNotUpgradeable" + CSVReasonWaitingForCleanupToComplete ConditionReason = "WaitingOnCleanup" +) + +// HasCaResources returns true if the CSV has owned APIServices or Webhooks. +func (c *ClusterServiceVersion) HasCAResources() bool { + // Return early if there are no owned APIServices + if len(c.Spec.APIServiceDefinitions.Owned)+len(c.Spec.WebhookDefinitions) == 0 { + return false + } + return true +} + +// Conditions appear in the status as a record of state transitions on the ClusterServiceVersion +// +k8s:openapi-gen=true +type ClusterServiceVersionCondition struct { + // Condition of the ClusterServiceVersion + Phase ClusterServiceVersionPhase `json:"phase,omitempty"` + // A human readable message indicating details about why the ClusterServiceVersion is in this condition. + // +optional + Message string `json:"message,omitempty"` + // A brief CamelCase message indicating details about why the ClusterServiceVersion is in this state. + // e.g. 'RequirementsNotMet' + // +optional + Reason ConditionReason `json:"reason,omitempty"` + // Last time we updated the status + // +optional + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + // Last time the status transitioned from one status to another. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// OwnsCRD determines whether the current CSV owns a particular CRD. +func (csv ClusterServiceVersion) OwnsCRD(name string) bool { + for _, desc := range csv.Spec.CustomResourceDefinitions.Owned { + if desc.Name == name { + return true + } + } + + return false +} + +// OwnsAPIService determines whether the current CSV owns a particular APIService. +func (csv ClusterServiceVersion) OwnsAPIService(name string) bool { + for _, desc := range csv.Spec.APIServiceDefinitions.Owned { + apiServiceName := fmt.Sprintf("%s.%s", desc.Version, desc.Group) + if apiServiceName == name { + return true + } + } + + return false +} + +// StatusReason is a camelcased reason for the status of a RequirementStatus or DependentStatus +type StatusReason string + +const ( + RequirementStatusReasonPresent StatusReason = "Present" + RequirementStatusReasonNotPresent StatusReason = "NotPresent" + RequirementStatusReasonPresentNotSatisfied StatusReason = "PresentNotSatisfied" + // The CRD is present but the Established condition is False (not available) + RequirementStatusReasonNotAvailable StatusReason = "PresentNotAvailable" + DependentStatusReasonSatisfied StatusReason = "Satisfied" + DependentStatusReasonNotSatisfied StatusReason = "NotSatisfied" +) + +// DependentStatus is the status for a dependent requirement (to prevent infinite nesting) +// +k8s:openapi-gen=true +type DependentStatus struct { + Group string `json:"group"` + Version string `json:"version"` + Kind string `json:"kind"` + Status StatusReason `json:"status"` + UUID string `json:"uuid,omitempty"` + Message string `json:"message,omitempty"` +} + +// +k8s:openapi-gen=true +type RequirementStatus struct { + Group string `json:"group"` + Version string `json:"version"` + Kind string `json:"kind"` + Name string `json:"name"` + Status StatusReason `json:"status"` + Message string `json:"message"` + UUID string `json:"uuid,omitempty"` + Dependents []DependentStatus `json:"dependents,omitempty"` +} + +// ClusterServiceVersionStatus represents information about the status of a CSV. Status may trail the actual +// state of a system. +// +k8s:openapi-gen=true +type ClusterServiceVersionStatus struct { + // Current condition of the ClusterServiceVersion + Phase ClusterServiceVersionPhase `json:"phase,omitempty"` + // A human readable message indicating details about why the ClusterServiceVersion is in this condition. + // +optional + Message string `json:"message,omitempty"` + // A brief CamelCase message indicating details about why the ClusterServiceVersion is in this state. + // e.g. 'RequirementsNotMet' + // +optional + Reason ConditionReason `json:"reason,omitempty"` + // Last time we updated the status + // +optional + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + // Last time the status transitioned from one status to another. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + // List of conditions, a history of state transitions + Conditions []ClusterServiceVersionCondition `json:"conditions,omitempty"` + // The status of each requirement for this CSV + RequirementStatus []RequirementStatus `json:"requirementStatus,omitempty"` + // Last time the owned APIService certs were updated + // +optional + CertsLastUpdated *metav1.Time `json:"certsLastUpdated,omitempty"` + // Time the owned APIService certs will rotate next + // +optional + CertsRotateAt *metav1.Time `json:"certsRotateAt,omitempty"` + // CleanupStatus represents information about the status of cleanup while a CSV is pending deletion + // +optional + Cleanup CleanupStatus `json:"cleanup,omitempty"` +} + +// CleanupStatus represents information about the status of cleanup while a CSV is pending deletion +// +k8s:openapi-gen=true +type CleanupStatus struct { + // PendingDeletion is the list of custom resource objects that are pending deletion and blocked on finalizers. + // This indicates the progress of cleanup that is blocking CSV deletion or operator uninstall. + // +optional + PendingDeletion []ResourceList `json:"pendingDeletion,omitempty"` +} + +// ResourceList represents a list of resources which are of the same Group/Kind +// +k8s:openapi-gen=true +type ResourceList struct { + Group string `json:"group"` + Kind string `json:"kind"` + Instances []ResourceInstance `json:"instances"` +} + +// +k8s:openapi-gen=true +type ResourceInstance struct { + Name string `json:"name"` + // Namespace can be empty for cluster-scoped resources + Namespace string `json:"namespace,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:resource:shortName={csv, csvs},categories=olm +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Display",type=string,JSONPath=`.spec.displayName`,description="The name of the CSV" +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`,description="The version of the CSV" +// +kubebuilder:printcolumn:name="Replaces",type=string,JSONPath=`.spec.replaces`,description="The name of a CSV that this one replaces" +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` + +// ClusterServiceVersion is a Custom Resource of type `ClusterServiceVersionSpec`. +type ClusterServiceVersion struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec ClusterServiceVersionSpec `json:"spec"` + // +optional + Status ClusterServiceVersionStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterServiceVersionList represents a list of ClusterServiceVersions. +type ClusterServiceVersionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ClusterServiceVersion `json:"items"` +} + +// GetAllCRDDescriptions returns a deduplicated set of CRDDescriptions that is +// the union of the owned and required CRDDescriptions. +// +// Descriptions with the same name prefer the value in Owned. +// Descriptions are returned in alphabetical order. +func (csv ClusterServiceVersion) GetAllCRDDescriptions() []CRDDescription { + set := make(map[string]CRDDescription) + for _, required := range csv.Spec.CustomResourceDefinitions.Required { + set[required.Name] = required + } + + for _, owned := range csv.Spec.CustomResourceDefinitions.Owned { + set[owned.Name] = owned + } + + keys := make([]string, 0) + for key := range set { + keys = append(keys, key) + } + sort.StringSlice(keys).Sort() + + descs := make([]CRDDescription, 0) + for _, key := range keys { + descs = append(descs, set[key]) + } + + return descs +} + +// GetAllAPIServiceDescriptions returns a deduplicated set of APIServiceDescriptions that is +// the union of the owned and required APIServiceDescriptions. +// +// Descriptions with the same name prefer the value in Owned. +// Descriptions are returned in alphabetical order. +func (csv ClusterServiceVersion) GetAllAPIServiceDescriptions() []APIServiceDescription { + set := make(map[string]APIServiceDescription) + for _, required := range csv.Spec.APIServiceDefinitions.Required { + name := fmt.Sprintf("%s.%s", required.Version, required.Group) + set[name] = required + } + + for _, owned := range csv.Spec.APIServiceDefinitions.Owned { + name := fmt.Sprintf("%s.%s", owned.Version, owned.Group) + set[name] = owned + } + + keys := make([]string, 0) + for key := range set { + keys = append(keys, key) + } + sort.StringSlice(keys).Sort() + + descs := make([]APIServiceDescription, 0) + for _, key := range keys { + descs = append(descs, set[key]) + } + + return descs +} + +// GetRequiredAPIServiceDescriptions returns a deduplicated set of required APIServiceDescriptions +// with the intersection of required and owned removed +// Equivalent to the set subtraction required - owned +// +// Descriptions are returned in alphabetical order. +func (csv ClusterServiceVersion) GetRequiredAPIServiceDescriptions() []APIServiceDescription { + set := make(map[string]APIServiceDescription) + for _, required := range csv.Spec.APIServiceDefinitions.Required { + name := fmt.Sprintf("%s.%s", required.Version, required.Group) + set[name] = required + } + + // Remove any shared owned from the set + for _, owned := range csv.Spec.APIServiceDefinitions.Owned { + name := fmt.Sprintf("%s.%s", owned.Version, owned.Group) + if _, ok := set[name]; ok { + delete(set, name) + } + } + + keys := make([]string, 0) + for key := range set { + keys = append(keys, key) + } + sort.StringSlice(keys).Sort() + + descs := make([]APIServiceDescription, 0) + for _, key := range keys { + descs = append(descs, set[key]) + } + + return descs +} + +// GetOwnedAPIServiceDescriptions returns a deduplicated set of owned APIServiceDescriptions +// +// Descriptions are returned in alphabetical order. +func (csv ClusterServiceVersion) GetOwnedAPIServiceDescriptions() []APIServiceDescription { + set := make(map[string]APIServiceDescription) + for _, owned := range csv.Spec.APIServiceDefinitions.Owned { + name := owned.GetName() + set[name] = owned + } + + keys := make([]string, 0) + for key := range set { + keys = append(keys, key) + } + sort.StringSlice(keys).Sort() + + descs := make([]APIServiceDescription, 0) + for _, key := range keys { + descs = append(descs, set[key]) + } + + return descs +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go new file mode 100644 index 000000000000..74bc9b819a40 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +groupName=operators.coreos.com +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators + +// Package v1alpha1 contains resources types for version v1alpha1 of the operators.coreos.com API group. +package v1alpha1 diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go new file mode 100644 index 000000000000..3b1b0feedf6e --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go @@ -0,0 +1,391 @@ +package v1alpha1 + +import ( + "errors" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + InstallPlanKind = "InstallPlan" + InstallPlanAPIVersion = GroupName + "/" + GroupVersion +) + +// Approval is the user approval policy for an InstallPlan. +// It must be one of "Automatic" or "Manual". +type Approval string + +const ( + ApprovalAutomatic Approval = "Automatic" + ApprovalManual Approval = "Manual" +) + +// InstallPlanSpec defines a set of Application resources to be installed +type InstallPlanSpec struct { + CatalogSource string `json:"source,omitempty"` + CatalogSourceNamespace string `json:"sourceNamespace,omitempty"` + ClusterServiceVersionNames []string `json:"clusterServiceVersionNames"` + Approval Approval `json:"approval"` + Approved bool `json:"approved"` + Generation int `json:"generation,omitempty"` +} + +// InstallPlanPhase is the current status of a InstallPlan as a whole. +type InstallPlanPhase string + +const ( + InstallPlanPhaseNone InstallPlanPhase = "" + InstallPlanPhasePlanning InstallPlanPhase = "Planning" + InstallPlanPhaseRequiresApproval InstallPlanPhase = "RequiresApproval" + InstallPlanPhaseInstalling InstallPlanPhase = "Installing" + InstallPlanPhaseComplete InstallPlanPhase = "Complete" + InstallPlanPhaseFailed InstallPlanPhase = "Failed" +) + +// InstallPlanConditionType describes the state of an InstallPlan at a certain point as a whole. +type InstallPlanConditionType string + +const ( + InstallPlanResolved InstallPlanConditionType = "Resolved" + InstallPlanInstalled InstallPlanConditionType = "Installed" +) + +// ConditionReason is a camelcased reason for the state transition. +type InstallPlanConditionReason string + +const ( + InstallPlanReasonPlanUnknown InstallPlanConditionReason = "PlanUnknown" + InstallPlanReasonInstallCheckFailed InstallPlanConditionReason = "InstallCheckFailed" + InstallPlanReasonDependencyConflict InstallPlanConditionReason = "DependenciesConflict" + InstallPlanReasonComponentFailed InstallPlanConditionReason = "InstallComponentFailed" +) + +// StepStatus is the current status of a particular resource an in +// InstallPlan +type StepStatus string + +const ( + StepStatusUnknown StepStatus = "Unknown" + StepStatusNotPresent StepStatus = "NotPresent" + StepStatusPresent StepStatus = "Present" + StepStatusCreated StepStatus = "Created" + StepStatusNotCreated StepStatus = "NotCreated" + StepStatusWaitingForAPI StepStatus = "WaitingForApi" + StepStatusUnsupportedResource StepStatus = "UnsupportedResource" +) + +// ErrInvalidInstallPlan is the error returned by functions that operate on +// InstallPlans when the InstallPlan does not contain totally valid data. +var ErrInvalidInstallPlan = errors.New("the InstallPlan contains invalid data") + +// InstallPlanStatus represents the information about the status of +// steps required to complete installation. +// +// Status may trail the actual state of a system. +type InstallPlanStatus struct { + Phase InstallPlanPhase `json:"phase"` + Conditions []InstallPlanCondition `json:"conditions,omitempty"` + CatalogSources []string `json:"catalogSources"` + Plan []*Step `json:"plan,omitempty"` + // BundleLookups is the set of in-progress requests to pull and unpackage bundle content to the cluster. + // +optional + BundleLookups []BundleLookup `json:"bundleLookups,omitempty"` + // AttenuatedServiceAccountRef references the service account that is used + // to do scoped operator install. + AttenuatedServiceAccountRef *corev1.ObjectReference `json:"attenuatedServiceAccountRef,omitempty"` + + // StartTime is the time when the controller began applying + // the resources listed in the plan to the cluster. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty"` + + // Message is a human-readable message containing detailed + // information that may be important to understanding why the + // plan has its current status. + // +optional + Message string `json:"message,omitempty"` +} + +// InstallPlanCondition represents the overall status of the execution of +// an InstallPlan. +type InstallPlanCondition struct { + Type InstallPlanConditionType `json:"type,omitempty"` + Status corev1.ConditionStatus `json:"status,omitempty"` // True, False, or Unknown + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason InstallPlanConditionReason `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +// allow overwriting `now` function for deterministic tests +var now = metav1.Now + +// GetCondition returns the InstallPlanCondition of the given type if it exists in the InstallPlanStatus' Conditions. +// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found. +func (s InstallPlanStatus) GetCondition(conditionType InstallPlanConditionType) InstallPlanCondition { + for _, cond := range s.Conditions { + if cond.Type == conditionType { + return cond + } + } + + return InstallPlanCondition{ + Type: conditionType, + Status: corev1.ConditionUnknown, + } +} + +// SetCondition adds or updates a condition, using `Type` as merge key. +func (s *InstallPlanStatus) SetCondition(cond InstallPlanCondition) InstallPlanCondition { + for i, existing := range s.Conditions { + if existing.Type != cond.Type { + continue + } + if existing.Status == cond.Status { + cond.LastTransitionTime = existing.LastTransitionTime + } + s.Conditions[i] = cond + return cond + } + s.Conditions = append(s.Conditions, cond) + return cond +} + +func OrderSteps(steps []*Step) []*Step { + // CSVs must be applied first + csvList := []*Step{} + + // CRDs must be applied second + crdList := []*Step{} + + // Other resources may be applied in any order + remainingResources := []*Step{} + for _, step := range steps { + switch step.Resource.Kind { + case crdKind: + crdList = append(crdList, step) + case ClusterServiceVersionKind: + csvList = append(csvList, step) + default: + remainingResources = append(remainingResources, step) + } + } + + result := make([]*Step, len(steps)) + i := 0 + + for j := range csvList { + result[i] = csvList[j] + i++ + } + + for j := range crdList { + result[i] = crdList[j] + i++ + } + + for j := range remainingResources { + result[i] = remainingResources[j] + i++ + } + + return result +} + +func (s InstallPlanStatus) NeedsRequeue() bool { + for _, step := range s.Plan { + switch step.Status { + case StepStatusWaitingForAPI: + return true + } + } + + return false +} +func ConditionFailed(cond InstallPlanConditionType, reason InstallPlanConditionReason, message string, now *metav1.Time) InstallPlanCondition { + return InstallPlanCondition{ + Type: cond, + Status: corev1.ConditionFalse, + Reason: reason, + Message: message, + LastUpdateTime: now, + LastTransitionTime: now, + } +} + +func ConditionMet(cond InstallPlanConditionType, now *metav1.Time) InstallPlanCondition { + return InstallPlanCondition{ + Type: cond, + Status: corev1.ConditionTrue, + LastUpdateTime: now, + LastTransitionTime: now, + } +} + +// Step represents the status of an individual step in an InstallPlan. +type Step struct { + Resolving string `json:"resolving"` + Resource StepResource `json:"resource"` + Optional bool `json:"optional,omitempty"` + Status StepStatus `json:"status"` +} + +// BundleLookupConditionType is a category of the overall state of a BundleLookup. +type BundleLookupConditionType string + +const ( + // BundleLookupPending describes BundleLookups that are not complete. + BundleLookupPending BundleLookupConditionType = "BundleLookupPending" + + // BundleLookupFailed describes conditions types for when BundleLookups fail + BundleLookupFailed BundleLookupConditionType = "BundleLookupFailed" + + crdKind = "CustomResourceDefinition" +) + +type BundleLookupCondition struct { + // Type of condition. + Type BundleLookupConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty"` + // Last time the condition was probed. + // +optional + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// BundleLookup is a request to pull and unpackage the content of a bundle to the cluster. +type BundleLookup struct { + // Path refers to the location of a bundle to pull. + // It's typically an image reference. + Path string `json:"path"` + // Identifier is the catalog-unique name of the operator (the name of the CSV for bundles that contain CSVs) + Identifier string `json:"identifier"` + // Replaces is the name of the bundle to replace with the one found at Path. + Replaces string `json:"replaces"` + // CatalogSourceRef is a reference to the CatalogSource the bundle path was resolved from. + CatalogSourceRef *corev1.ObjectReference `json:"catalogSourceRef"` + // Conditions represents the overall state of a BundleLookup. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []BundleLookupCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // The effective properties of the unpacked bundle. + // +optional + Properties string `json:"properties,omitempty"` +} + +// GetCondition returns the BundleLookupCondition of the given type if it exists in the BundleLookup's Conditions. +// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found. +func (b BundleLookup) GetCondition(conditionType BundleLookupConditionType) BundleLookupCondition { + for _, cond := range b.Conditions { + if cond.Type == conditionType { + return cond + } + } + + return BundleLookupCondition{ + Type: conditionType, + Status: corev1.ConditionUnknown, + } +} + +// RemoveCondition removes the BundleLookupCondition of the given type from the BundleLookup's Conditions if it exists. +func (b *BundleLookup) RemoveCondition(conditionType BundleLookupConditionType) { + for i, cond := range b.Conditions { + if cond.Type == conditionType { + b.Conditions = append(b.Conditions[:i], b.Conditions[i+1:]...) + if len(b.Conditions) == 0 { + b.Conditions = nil + } + return + } + } +} + +// SetCondition replaces the existing BundleLookupCondition of the same type, or adds it if it was not found. +func (b *BundleLookup) SetCondition(cond BundleLookupCondition) BundleLookupCondition { + for i, existing := range b.Conditions { + if existing.Type != cond.Type { + continue + } + if existing.Status == cond.Status { + cond.LastTransitionTime = existing.LastTransitionTime + } + b.Conditions[i] = cond + return cond + } + b.Conditions = append(b.Conditions, cond) + + return cond +} + +func (s *Step) String() string { + return fmt.Sprintf("%s: %s (%s)", s.Resolving, s.Resource, s.Status) +} + +// StepResource represents the status of a resource to be tracked by an +// InstallPlan. +type StepResource struct { + CatalogSource string `json:"sourceName"` + CatalogSourceNamespace string `json:"sourceNamespace"` + Group string `json:"group"` + Version string `json:"version"` + Kind string `json:"kind"` + Name string `json:"name"` + Manifest string `json:"manifest,omitempty"` +} + +func (r StepResource) String() string { + return fmt.Sprintf("%s[%s/%s/%s (%s/%s)]", r.Name, r.Group, r.Version, r.Kind, r.CatalogSource, r.CatalogSourceNamespace) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +kubebuilder:resource:shortName=ip,categories=olm +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="CSV",type=string,JSONPath=`.spec.clusterServiceVersionNames[0]`,description="The first CSV in the list of clusterServiceVersionNames" +// +kubebuilder:printcolumn:name="Approval",type=string,JSONPath=`.spec.approval`,description="The approval mode" +// +kubebuilder:printcolumn:name="Approved",type=boolean,JSONPath=`.spec.approved` + +// InstallPlan defines the installation of a set of operators. +type InstallPlan struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec InstallPlanSpec `json:"spec"` + // +optional + Status InstallPlanStatus `json:"status"` +} + +// EnsureCatalogSource ensures that a CatalogSource is present in the Status +// block of an InstallPlan. +func (p *InstallPlan) EnsureCatalogSource(sourceName string) { + for _, srcName := range p.Status.CatalogSources { + if srcName == sourceName { + return + } + } + + p.Status.CatalogSources = append(p.Status.CatalogSources, sourceName) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InstallPlanList is a list of InstallPlan resources. +type InstallPlanList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []InstallPlan `json:"items"` +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go new file mode 100644 index 000000000000..f1cd86f1a372 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go @@ -0,0 +1,55 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/operator-framework/api/pkg/operators" +) + +const ( + // GroupName is the group name used in this package. + GroupName = operators.GroupName + // GroupVersion is the group version used in this package. + GroupVersion = "v1alpha1" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder initializes a scheme builder + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme + + // localSchemeBuilder is expected by generated conversion functions + localSchemeBuilder = &SchemeBuilder +) + +// addKnownTypes adds the list of known types to Scheme +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CatalogSource{}, + &CatalogSourceList{}, + &InstallPlan{}, + &InstallPlanList{}, + &Subscription{}, + &SubscriptionList{}, + &ClusterServiceVersion{}, + &ClusterServiceVersionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go new file mode 100644 index 000000000000..7aa854f596e5 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go @@ -0,0 +1,362 @@ +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + SubscriptionKind = "Subscription" + SubscriptionCRDAPIVersion = GroupName + "/" + GroupVersion +) + +// SubscriptionState tracks when updates are available, installing, or service is up to date +type SubscriptionState string + +const ( + SubscriptionStateNone = "" + SubscriptionStateFailed = "UpgradeFailed" + SubscriptionStateUpgradeAvailable = "UpgradeAvailable" + SubscriptionStateUpgradePending = "UpgradePending" + SubscriptionStateAtLatest = "AtLatestKnown" +) + +const ( + SubscriptionReasonInvalidCatalog ConditionReason = "InvalidCatalog" + SubscriptionReasonUpgradeSucceeded ConditionReason = "UpgradeSucceeded" +) + +// SubscriptionSpec defines an Application that can be installed +type SubscriptionSpec struct { + CatalogSource string `json:"source"` + CatalogSourceNamespace string `json:"sourceNamespace"` + Package string `json:"name"` + Channel string `json:"channel,omitempty"` + StartingCSV string `json:"startingCSV,omitempty"` + InstallPlanApproval Approval `json:"installPlanApproval,omitempty"` + Config *SubscriptionConfig `json:"config,omitempty"` +} + +// SubscriptionConfig contains configuration specified for a subscription. +type SubscriptionConfig struct { + // Selector is the label selector for pods to be configured. + // Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector,omitempty"` + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations are the pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // Resources represents compute resources required by this container. + // Immutable. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // EnvFrom is a list of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Immutable. + // +optional + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` + // Env is a list of environment variables to set in the container. + // Cannot be updated. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge"` + + // List of Volumes to set in the podSpec. + // +optional + Volumes []corev1.Volume `json:"volumes,omitempty"` + + // List of VolumeMounts to set in the container. + // +optional + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` + + // If specified, overrides the pod's scheduling constraints. + // nil sub-attributes will *not* override the original values in the pod.spec for those sub-attributes. + // Use empty object ({}) to erase original sub-attribute values. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + + // Annotations is an unstructured key value map stored with each Deployment, Pod, APIService in the Operator. + // Typically, annotations may be set by external tools to store and retrieve arbitrary metadata. + // Use this field to pre-define annotations that OLM should add to each of the Subscription's + // deployments, pods, and apiservices. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` +} + +// SubscriptionConditionType indicates an explicit state condition about a Subscription in "abnormal-true" +// polarity form (see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties). +type SubscriptionConditionType string + +const ( + // SubscriptionCatalogSourcesUnhealthy indicates that some or all of the CatalogSources to be used in resolution are unhealthy. + SubscriptionCatalogSourcesUnhealthy SubscriptionConditionType = "CatalogSourcesUnhealthy" + + // SubscriptionInstallPlanMissing indicates that a Subscription's InstallPlan is missing. + SubscriptionInstallPlanMissing SubscriptionConditionType = "InstallPlanMissing" + + // SubscriptionInstallPlanPending indicates that a Subscription's InstallPlan is pending installation. + SubscriptionInstallPlanPending SubscriptionConditionType = "InstallPlanPending" + + // SubscriptionInstallPlanFailed indicates that the installation of a Subscription's InstallPlan has failed. + SubscriptionInstallPlanFailed SubscriptionConditionType = "InstallPlanFailed" + + // SubscriptionResolutionFailed indicates that the dependency resolution in the namespace in which the subscription is created has failed + SubscriptionResolutionFailed SubscriptionConditionType = "ResolutionFailed" + + // SubscriptionBundleUnpacking indicates that the unpack job is currently running + SubscriptionBundleUnpacking SubscriptionConditionType = "BundleUnpacking" + + // SubscriptionBundleUnpackFailed indicates that the unpack job failed + SubscriptionBundleUnpackFailed SubscriptionConditionType = "BundleUnpackFailed" + + // SubscriptionDeprecated is a roll-up condition which indicates that the Operator currently installed with this Subscription + //has been deprecated. It will be present when any of the three deprecation types (Package, Channel, Bundle) are present. + SubscriptionDeprecated SubscriptionConditionType = "Deprecated" + + // SubscriptionOperatorDeprecated indicates that the Package currently installed with this Subscription has been deprecated. + SubscriptionPackageDeprecated SubscriptionConditionType = "PackageDeprecated" + + // SubscriptionOperatorDeprecated indicates that the Channel used with this Subscription has been deprecated. + SubscriptionChannelDeprecated SubscriptionConditionType = "ChannelDeprecated" + + // SubscriptionOperatorDeprecated indicates that the Bundle currently installed with this Subscription has been deprecated. + SubscriptionBundleDeprecated SubscriptionConditionType = "BundleDeprecated" +) + +const ( + // NoCatalogSourcesFound is a reason string for Subscriptions with unhealthy CatalogSources due to none being available. + NoCatalogSourcesFound = "NoCatalogSourcesFound" + + // AllCatalogSourcesHealthy is a reason string for Subscriptions that transitioned due to all CatalogSources being healthy. + AllCatalogSourcesHealthy = "AllCatalogSourcesHealthy" + + // CatalogSourcesAdded is a reason string for Subscriptions that transitioned due to CatalogSources being added. + CatalogSourcesAdded = "CatalogSourcesAdded" + + // CatalogSourcesUpdated is a reason string for Subscriptions that transitioned due to CatalogSource being updated. + CatalogSourcesUpdated = "CatalogSourcesUpdated" + + // CatalogSourcesDeleted is a reason string for Subscriptions that transitioned due to CatalogSources being removed. + CatalogSourcesDeleted = "CatalogSourcesDeleted" + + // UnhealthyCatalogSourceFound is a reason string for Subscriptions that transitioned because an unhealthy CatalogSource was found. + UnhealthyCatalogSourceFound = "UnhealthyCatalogSourceFound" + + // ReferencedInstallPlanNotFound is a reason string for Subscriptions that transitioned due to a referenced InstallPlan not being found. + ReferencedInstallPlanNotFound = "ReferencedInstallPlanNotFound" + + // InstallPlanNotYetReconciled is a reason string for Subscriptions that transitioned due to a referenced InstallPlan not being reconciled yet. + InstallPlanNotYetReconciled = "InstallPlanNotYetReconciled" + + // InstallPlanFailed is a reason string for Subscriptions that transitioned due to a referenced InstallPlan failing without setting an explicit failure condition. + InstallPlanFailed = "InstallPlanFailed" +) + +// SubscriptionCondition represents the latest available observations of a Subscription's state. +type SubscriptionCondition struct { + // Type is the type of Subscription condition. + Type SubscriptionConditionType `json:"type" description:"type of Subscription condition"` + + // Status is the status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // Reason is a one-word CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` + + // LastHeartbeatTime is the last time we got an update on a given condition + // +optional + LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty" description:"last time we got an update on a given condition"` + + // LastTransitionTime is the last time the condition transit from one status to another + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another" hash:"ignore"` +} + +// Equals returns true if a SubscriptionCondition equals the one given, false otherwise. +// Equality is determined by the equality of the type, status, reason, and message fields ONLY. +func (s SubscriptionCondition) Equals(condition SubscriptionCondition) bool { + return s.Type == condition.Type && s.Status == condition.Status && s.Reason == condition.Reason && s.Message == condition.Message +} + +type SubscriptionStatus struct { + // CurrentCSV is the CSV the Subscription is progressing to. + // +optional + CurrentCSV string `json:"currentCSV,omitempty"` + + // InstalledCSV is the CSV currently installed by the Subscription. + // +optional + InstalledCSV string `json:"installedCSV,omitempty"` + + // Install is a reference to the latest InstallPlan generated for the Subscription. + // DEPRECATED: InstallPlanRef + // +optional + Install *InstallPlanReference `json:"installplan,omitempty"` + + // State represents the current state of the Subscription + // +optional + State SubscriptionState `json:"state,omitempty"` + + // Reason is the reason the Subscription was transitioned to its current state. + // +optional + Reason ConditionReason `json:"reason,omitempty"` + + // InstallPlanGeneration is the current generation of the installplan + // +optional + InstallPlanGeneration int `json:"installPlanGeneration,omitempty"` + + // InstallPlanRef is a reference to the latest InstallPlan that contains the Subscription's current CSV. + // +optional + InstallPlanRef *corev1.ObjectReference `json:"installPlanRef,omitempty"` + + // CatalogHealth contains the Subscription's view of its relevant CatalogSources' status. + // It is used to determine SubscriptionStatusConditions related to CatalogSources. + // +optional + // +patchMergeKey= + // +patchStrategy=merge + CatalogHealth []SubscriptionCatalogHealth `json:"catalogHealth,omitempty"` + + // Conditions is a list of the latest available observations about a Subscription's current state. + // +optional + Conditions []SubscriptionCondition `json:"conditions,omitempty" hash:"set"` + + // LastUpdated represents the last time that the Subscription status was updated. + LastUpdated metav1.Time `json:"lastUpdated"` +} + +// GetCondition returns the SubscriptionCondition of the given type if it exists in the SubscriptionStatus' Conditions. +// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found. +func (s SubscriptionStatus) GetCondition(conditionType SubscriptionConditionType) SubscriptionCondition { + for _, cond := range s.Conditions { + if cond.Type == conditionType { + return cond + } + } + + return SubscriptionCondition{ + Type: conditionType, + Status: corev1.ConditionUnknown, + } +} + +// SetCondition sets the given SubscriptionCondition in the SubscriptionStatus' Conditions. +func (s *SubscriptionStatus) SetCondition(condition SubscriptionCondition) { + for i, cond := range s.Conditions { + if cond.Type == condition.Type { + s.Conditions[i] = condition + return + } + } + + s.Conditions = append(s.Conditions, condition) +} + +// RemoveConditions removes any conditions of the given types from the SubscriptionStatus' Conditions. +func (s *SubscriptionStatus) RemoveConditions(remove ...SubscriptionConditionType) { + exclusions := map[SubscriptionConditionType]struct{}{} + for _, r := range remove { + exclusions[r] = struct{}{} + } + + var filtered []SubscriptionCondition + for _, cond := range s.Conditions { + if _, ok := exclusions[cond.Type]; ok { + // Skip excluded condition types + continue + } + filtered = append(filtered, cond) + } + + s.Conditions = filtered +} + +type InstallPlanReference struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Name string `json:"name"` + UID types.UID `json:"uuid"` +} + +// SubscriptionCatalogHealth describes the health of a CatalogSource the Subscription knows about. +type SubscriptionCatalogHealth struct { + // CatalogSourceRef is a reference to a CatalogSource. + CatalogSourceRef *corev1.ObjectReference `json:"catalogSourceRef"` + + // LastUpdated represents the last time that the CatalogSourceHealth changed + LastUpdated *metav1.Time `json:"lastUpdated"` + + // Healthy is true if the CatalogSource is healthy; false otherwise. + Healthy bool `json:"healthy"` +} + +// Equals returns true if a SubscriptionCatalogHealth equals the one given, false otherwise. +// Equality is based SOLEY on health and UID. +func (s SubscriptionCatalogHealth) Equals(health SubscriptionCatalogHealth) bool { + return s.Healthy == health.Healthy && s.CatalogSourceRef.UID == health.CatalogSourceRef.UID +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +kubebuilder:resource:shortName={sub, subs},categories=olm +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Package",type=string,JSONPath=`.spec.name`,description="The package subscribed to" +// +kubebuilder:printcolumn:name="Source",type=string,JSONPath=`.spec.source`,description="The catalog source for the specified package" +// +kubebuilder:printcolumn:name="Channel",type=string,JSONPath=`.spec.channel`,description="The channel of updates to subscribe to" + +// Subscription keeps operators up to date by tracking changes to Catalogs. +type Subscription struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec *SubscriptionSpec `json:"spec"` + // +optional + Status SubscriptionStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubscriptionList is a list of Subscription resources. +type SubscriptionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Subscription `json:"items"` +} + +// GetInstallPlanApproval gets the configured install plan approval or the default +func (s *Subscription) GetInstallPlanApproval() Approval { + if s.Spec.InstallPlanApproval == ApprovalManual { + return ApprovalManual + } + return ApprovalAutomatic +} + +// NewInstallPlanReference returns an InstallPlanReference for the given ObjectReference. +func NewInstallPlanReference(ref *corev1.ObjectReference) *InstallPlanReference { + return &InstallPlanReference{ + APIVersion: ref.APIVersion, + Kind: ref.Kind, + Name: ref.Name, + UID: ref.UID, + } +} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..684a7432a6e5 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1632 @@ +//go:build !ignore_autogenerated + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "encoding/json" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResourceReference) DeepCopyInto(out *APIResourceReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceReference. +func (in *APIResourceReference) DeepCopy() *APIResourceReference { + if in == nil { + return nil + } + out := new(APIResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceDefinitions) DeepCopyInto(out *APIServiceDefinitions) { + *out = *in + if in.Owned != nil { + in, out := &in.Owned, &out.Owned + *out = make([]APIServiceDescription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]APIServiceDescription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDefinitions. +func (in *APIServiceDefinitions) DeepCopy() *APIServiceDefinitions { + if in == nil { + return nil + } + out := new(APIServiceDefinitions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceDescription) DeepCopyInto(out *APIServiceDescription) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]APIResourceReference, len(*in)) + copy(*out, *in) + } + if in.StatusDescriptors != nil { + in, out := &in.StatusDescriptors, &out.StatusDescriptors + *out = make([]StatusDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SpecDescriptors != nil { + in, out := &in.SpecDescriptors, &out.SpecDescriptors + *out = make([]SpecDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ActionDescriptor != nil { + in, out := &in.ActionDescriptor, &out.ActionDescriptor + *out = make([]ActionDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDescription. +func (in *APIServiceDescription) DeepCopy() *APIServiceDescription { + if in == nil { + return nil + } + out := new(APIServiceDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActionDescriptor) DeepCopyInto(out *ActionDescriptor) { + *out = *in + if in.XDescriptors != nil { + in, out := &in.XDescriptors, &out.XDescriptors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDescriptor. +func (in *ActionDescriptor) DeepCopy() *ActionDescriptor { + if in == nil { + return nil + } + out := new(ActionDescriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppLink) DeepCopyInto(out *AppLink) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppLink. +func (in *AppLink) DeepCopy() *AppLink { + if in == nil { + return nil + } + out := new(AppLink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BundleLookup) DeepCopyInto(out *BundleLookup) { + *out = *in + if in.CatalogSourceRef != nil { + in, out := &in.CatalogSourceRef, &out.CatalogSourceRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]BundleLookupCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookup. +func (in *BundleLookup) DeepCopy() *BundleLookup { + if in == nil { + return nil + } + out := new(BundleLookup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BundleLookupCondition) DeepCopyInto(out *BundleLookupCondition) { + *out = *in + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookupCondition. +func (in *BundleLookupCondition) DeepCopy() *BundleLookupCondition { + if in == nil { + return nil + } + out := new(BundleLookupCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRDDescription) DeepCopyInto(out *CRDDescription) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]APIResourceReference, len(*in)) + copy(*out, *in) + } + if in.StatusDescriptors != nil { + in, out := &in.StatusDescriptors, &out.StatusDescriptors + *out = make([]StatusDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SpecDescriptors != nil { + in, out := &in.SpecDescriptors, &out.SpecDescriptors + *out = make([]SpecDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ActionDescriptor != nil { + in, out := &in.ActionDescriptor, &out.ActionDescriptor + *out = make([]ActionDescriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDDescription. +func (in *CRDDescription) DeepCopy() *CRDDescription { + if in == nil { + return nil + } + out := new(CRDDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSource) DeepCopyInto(out *CatalogSource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource. +func (in *CatalogSource) DeepCopy() *CatalogSource { + if in == nil { + return nil + } + out := new(CatalogSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CatalogSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSourceList) DeepCopyInto(out *CatalogSourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CatalogSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceList. +func (in *CatalogSourceList) DeepCopy() *CatalogSourceList { + if in == nil { + return nil + } + out := new(CatalogSourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CatalogSourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) { + *out = *in + if in.GrpcPodConfig != nil { + in, out := &in.GrpcPodConfig, &out.GrpcPodConfig + *out = new(GrpcPodConfig) + (*in).DeepCopyInto(*out) + } + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(UpdateStrategy) + (*in).DeepCopyInto(*out) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Icon = in.Icon +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceSpec. +func (in *CatalogSourceSpec) DeepCopy() *CatalogSourceSpec { + if in == nil { + return nil + } + out := new(CatalogSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) { + *out = *in + if in.LatestImageRegistryPoll != nil { + in, out := &in.LatestImageRegistryPoll, &out.LatestImageRegistryPoll + *out = (*in).DeepCopy() + } + if in.ConfigMapResource != nil { + in, out := &in.ConfigMapResource, &out.ConfigMapResource + *out = new(ConfigMapResourceReference) + (*in).DeepCopyInto(*out) + } + if in.RegistryServiceStatus != nil { + in, out := &in.RegistryServiceStatus, &out.RegistryServiceStatus + *out = new(RegistryServiceStatus) + (*in).DeepCopyInto(*out) + } + if in.GRPCConnectionState != nil { + in, out := &in.GRPCConnectionState, &out.GRPCConnectionState + *out = new(GRPCConnectionState) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceStatus. +func (in *CatalogSourceStatus) DeepCopy() *CatalogSourceStatus { + if in == nil { + return nil + } + out := new(CatalogSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CleanupSpec) DeepCopyInto(out *CleanupSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupSpec. +func (in *CleanupSpec) DeepCopy() *CleanupSpec { + if in == nil { + return nil + } + out := new(CleanupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CleanupStatus) DeepCopyInto(out *CleanupStatus) { + *out = *in + if in.PendingDeletion != nil { + in, out := &in.PendingDeletion, &out.PendingDeletion + *out = make([]ResourceList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupStatus. +func (in *CleanupStatus) DeepCopy() *CleanupStatus { + if in == nil { + return nil + } + out := new(CleanupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterServiceVersion) DeepCopyInto(out *ClusterServiceVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersion. +func (in *ClusterServiceVersion) DeepCopy() *ClusterServiceVersion { + if in == nil { + return nil + } + out := new(ClusterServiceVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterServiceVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterServiceVersionCondition) DeepCopyInto(out *ClusterServiceVersionCondition) { + *out = *in + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionCondition. +func (in *ClusterServiceVersionCondition) DeepCopy() *ClusterServiceVersionCondition { + if in == nil { + return nil + } + out := new(ClusterServiceVersionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterServiceVersionList) DeepCopyInto(out *ClusterServiceVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterServiceVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionList. +func (in *ClusterServiceVersionList) DeepCopy() *ClusterServiceVersionList { + if in == nil { + return nil + } + out := new(ClusterServiceVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterServiceVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec) { + *out = *in + in.InstallStrategy.DeepCopyInto(&out.InstallStrategy) + in.Version.DeepCopyInto(&out.Version) + in.CustomResourceDefinitions.DeepCopyInto(&out.CustomResourceDefinitions) + in.APIServiceDefinitions.DeepCopyInto(&out.APIServiceDefinitions) + if in.WebhookDefinitions != nil { + in, out := &in.WebhookDefinitions, &out.WebhookDefinitions + *out = make([]WebhookDescription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NativeAPIs != nil { + in, out := &in.NativeAPIs, &out.NativeAPIs + *out = make([]metav1.GroupVersionKind, len(*in)) + copy(*out, *in) + } + if in.Keywords != nil { + in, out := &in.Keywords, &out.Keywords + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Maintainers != nil { + in, out := &in.Maintainers, &out.Maintainers + *out = make([]Maintainer, len(*in)) + copy(*out, *in) + } + out.Provider = in.Provider + if in.Links != nil { + in, out := &in.Links, &out.Links + *out = make([]AppLink, len(*in)) + copy(*out, *in) + } + if in.Icon != nil { + in, out := &in.Icon, &out.Icon + *out = make([]Icon, len(*in)) + copy(*out, *in) + } + if in.InstallModes != nil { + in, out := &in.InstallModes, &out.InstallModes + *out = make([]InstallMode, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + out.Cleanup = in.Cleanup + if in.Skips != nil { + in, out := &in.Skips, &out.Skips + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RelatedImages != nil { + in, out := &in.RelatedImages, &out.RelatedImages + *out = make([]RelatedImage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionSpec. +func (in *ClusterServiceVersionSpec) DeepCopy() *ClusterServiceVersionSpec { + if in == nil { + return nil + } + out := new(ClusterServiceVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterServiceVersionStatus) DeepCopyInto(out *ClusterServiceVersionStatus) { + *out = *in + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterServiceVersionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RequirementStatus != nil { + in, out := &in.RequirementStatus, &out.RequirementStatus + *out = make([]RequirementStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CertsLastUpdated != nil { + in, out := &in.CertsLastUpdated, &out.CertsLastUpdated + *out = (*in).DeepCopy() + } + if in.CertsRotateAt != nil { + in, out := &in.CertsRotateAt, &out.CertsRotateAt + *out = (*in).DeepCopy() + } + in.Cleanup.DeepCopyInto(&out.Cleanup) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionStatus. +func (in *ClusterServiceVersionStatus) DeepCopy() *ClusterServiceVersionStatus { + if in == nil { + return nil + } + out := new(ClusterServiceVersionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapResourceReference) DeepCopyInto(out *ConfigMapResourceReference) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapResourceReference. +func (in *ConfigMapResourceReference) DeepCopy() *ConfigMapResourceReference { + if in == nil { + return nil + } + out := new(ConfigMapResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitions) DeepCopyInto(out *CustomResourceDefinitions) { + *out = *in + if in.Owned != nil { + in, out := &in.Owned, &out.Owned + *out = make([]CRDDescription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = make([]CRDDescription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitions. +func (in *CustomResourceDefinitions) DeepCopy() *CustomResourceDefinitions { + if in == nil { + return nil + } + out := new(CustomResourceDefinitions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DependentStatus) DeepCopyInto(out *DependentStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependentStatus. +func (in *DependentStatus) DeepCopy() *DependentStatus { + if in == nil { + return nil + } + out := new(DependentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtractContentConfig) DeepCopyInto(out *ExtractContentConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtractContentConfig. +func (in *ExtractContentConfig) DeepCopy() *ExtractContentConfig { + if in == nil { + return nil + } + out := new(ExtractContentConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCConnectionState) DeepCopyInto(out *GRPCConnectionState) { + *out = *in + in.LastConnectTime.DeepCopyInto(&out.LastConnectTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCConnectionState. +func (in *GRPCConnectionState) DeepCopy() *GRPCConnectionState { + if in == nil { + return nil + } + out := new(GRPCConnectionState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrpcPodConfig) DeepCopyInto(out *GrpcPodConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } + if in.MemoryTarget != nil { + in, out := &in.MemoryTarget, &out.MemoryTarget + x := (*in).DeepCopy() + *out = &x + } + if in.ExtractContent != nil { + in, out := &in.ExtractContent, &out.ExtractContent + *out = new(ExtractContentConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrpcPodConfig. +func (in *GrpcPodConfig) DeepCopy() *GrpcPodConfig { + if in == nil { + return nil + } + out := new(GrpcPodConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Icon) DeepCopyInto(out *Icon) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Icon. +func (in *Icon) DeepCopy() *Icon { + if in == nil { + return nil + } + out := new(Icon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallMode) DeepCopyInto(out *InstallMode) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallMode. +func (in *InstallMode) DeepCopy() *InstallMode { + if in == nil { + return nil + } + out := new(InstallMode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in InstallModeSet) DeepCopyInto(out *InstallModeSet) { + { + in := &in + *out = make(InstallModeSet, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallModeSet. +func (in InstallModeSet) DeepCopy() InstallModeSet { + if in == nil { + return nil + } + out := new(InstallModeSet) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlan) DeepCopyInto(out *InstallPlan) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlan. +func (in *InstallPlan) DeepCopy() *InstallPlan { + if in == nil { + return nil + } + out := new(InstallPlan) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstallPlan) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlanCondition) DeepCopyInto(out *InstallPlanCondition) { + *out = *in + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanCondition. +func (in *InstallPlanCondition) DeepCopy() *InstallPlanCondition { + if in == nil { + return nil + } + out := new(InstallPlanCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlanList) DeepCopyInto(out *InstallPlanList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InstallPlan, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanList. +func (in *InstallPlanList) DeepCopy() *InstallPlanList { + if in == nil { + return nil + } + out := new(InstallPlanList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstallPlanList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlanReference) DeepCopyInto(out *InstallPlanReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanReference. +func (in *InstallPlanReference) DeepCopy() *InstallPlanReference { + if in == nil { + return nil + } + out := new(InstallPlanReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlanSpec) DeepCopyInto(out *InstallPlanSpec) { + *out = *in + if in.ClusterServiceVersionNames != nil { + in, out := &in.ClusterServiceVersionNames, &out.ClusterServiceVersionNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanSpec. +func (in *InstallPlanSpec) DeepCopy() *InstallPlanSpec { + if in == nil { + return nil + } + out := new(InstallPlanSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallPlanStatus) DeepCopyInto(out *InstallPlanStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]InstallPlanCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CatalogSources != nil { + in, out := &in.CatalogSources, &out.CatalogSources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Plan != nil { + in, out := &in.Plan, &out.Plan + *out = make([]*Step, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Step) + **out = **in + } + } + } + if in.BundleLookups != nil { + in, out := &in.BundleLookups, &out.BundleLookups + *out = make([]BundleLookup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AttenuatedServiceAccountRef != nil { + in, out := &in.AttenuatedServiceAccountRef, &out.AttenuatedServiceAccountRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanStatus. +func (in *InstallPlanStatus) DeepCopy() *InstallPlanStatus { + if in == nil { + return nil + } + out := new(InstallPlanStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Maintainer) DeepCopyInto(out *Maintainer) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintainer. +func (in *Maintainer) DeepCopy() *Maintainer { + if in == nil { + return nil + } + out := new(Maintainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedInstallStrategy) DeepCopyInto(out *NamedInstallStrategy) { + *out = *in + in.StrategySpec.DeepCopyInto(&out.StrategySpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedInstallStrategy. +func (in *NamedInstallStrategy) DeepCopy() *NamedInstallStrategy { + if in == nil { + return nil + } + out := new(NamedInstallStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryPoll) DeepCopyInto(out *RegistryPoll) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryPoll. +func (in *RegistryPoll) DeepCopy() *RegistryPoll { + if in == nil { + return nil + } + out := new(RegistryPoll) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryServiceStatus) DeepCopyInto(out *RegistryServiceStatus) { + *out = *in + in.CreatedAt.DeepCopyInto(&out.CreatedAt) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryServiceStatus. +func (in *RegistryServiceStatus) DeepCopy() *RegistryServiceStatus { + if in == nil { + return nil + } + out := new(RegistryServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelatedImage) DeepCopyInto(out *RelatedImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedImage. +func (in *RelatedImage) DeepCopy() *RelatedImage { + if in == nil { + return nil + } + out := new(RelatedImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequirementStatus) DeepCopyInto(out *RequirementStatus) { + *out = *in + if in.Dependents != nil { + in, out := &in.Dependents, &out.Dependents + *out = make([]DependentStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequirementStatus. +func (in *RequirementStatus) DeepCopy() *RequirementStatus { + if in == nil { + return nil + } + out := new(RequirementStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceInstance) DeepCopyInto(out *ResourceInstance) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceInstance. +func (in *ResourceInstance) DeepCopy() *ResourceInstance { + if in == nil { + return nil + } + out := new(ResourceInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceList) DeepCopyInto(out *ResourceList) { + *out = *in + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]ResourceInstance, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in *ResourceList) DeepCopy() *ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpecDescriptor) DeepCopyInto(out *SpecDescriptor) { + *out = *in + if in.XDescriptors != nil { + in, out := &in.XDescriptors, &out.XDescriptors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecDescriptor. +func (in *SpecDescriptor) DeepCopy() *SpecDescriptor { + if in == nil { + return nil + } + out := new(SpecDescriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusDescriptor) DeepCopyInto(out *StatusDescriptor) { + *out = *in + if in.XDescriptors != nil { + in, out := &in.XDescriptors, &out.XDescriptors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDescriptor. +func (in *StatusDescriptor) DeepCopy() *StatusDescriptor { + if in == nil { + return nil + } + out := new(StatusDescriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Step) DeepCopyInto(out *Step) { + *out = *in + out.Resource = in.Resource +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step. +func (in *Step) DeepCopy() *Step { + if in == nil { + return nil + } + out := new(Step) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepResource) DeepCopyInto(out *StepResource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepResource. +func (in *StepResource) DeepCopy() *StepResource { + if in == nil { + return nil + } + out := new(StepResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrategyDeploymentPermissions) DeepCopyInto(out *StrategyDeploymentPermissions) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]rbacv1.PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentPermissions. +func (in *StrategyDeploymentPermissions) DeepCopy() *StrategyDeploymentPermissions { + if in == nil { + return nil + } + out := new(StrategyDeploymentPermissions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrategyDeploymentSpec) DeepCopyInto(out *StrategyDeploymentSpec) { + *out = *in + in.Spec.DeepCopyInto(&out.Spec) + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = make(labels.Set, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentSpec. +func (in *StrategyDeploymentSpec) DeepCopy() *StrategyDeploymentSpec { + if in == nil { + return nil + } + out := new(StrategyDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StrategyDetailsDeployment) DeepCopyInto(out *StrategyDetailsDeployment) { + *out = *in + if in.DeploymentSpecs != nil { + in, out := &in.DeploymentSpecs, &out.DeploymentSpecs + *out = make([]StrategyDeploymentSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]StrategyDeploymentPermissions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterPermissions != nil { + in, out := &in.ClusterPermissions, &out.ClusterPermissions + *out = make([]StrategyDeploymentPermissions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDetailsDeployment. +func (in *StrategyDetailsDeployment) DeepCopy() *StrategyDetailsDeployment { + if in == nil { + return nil + } + out := new(StrategyDetailsDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subscription) DeepCopyInto(out *Subscription) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(SubscriptionSpec) + (*in).DeepCopyInto(*out) + } + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription. +func (in *Subscription) DeepCopy() *Subscription { + if in == nil { + return nil + } + out := new(Subscription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Subscription) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCatalogHealth) DeepCopyInto(out *SubscriptionCatalogHealth) { + *out = *in + if in.CatalogSourceRef != nil { + in, out := &in.CatalogSourceRef, &out.CatalogSourceRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCatalogHealth. +func (in *SubscriptionCatalogHealth) DeepCopy() *SubscriptionCatalogHealth { + if in == nil { + return nil + } + out := new(SubscriptionCatalogHealth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionCondition) DeepCopyInto(out *SubscriptionCondition) { + *out = *in + if in.LastHeartbeatTime != nil { + in, out := &in.LastHeartbeatTime, &out.LastHeartbeatTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCondition. +func (in *SubscriptionCondition) DeepCopy() *SubscriptionCondition { + if in == nil { + return nil + } + out := new(SubscriptionCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionConfig. +func (in *SubscriptionConfig) DeepCopy() *SubscriptionConfig { + if in == nil { + return nil + } + out := new(SubscriptionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Subscription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList. +func (in *SubscriptionList) DeepCopy() *SubscriptionList { + if in == nil { + return nil + } + out := new(SubscriptionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(SubscriptionConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec. +func (in *SubscriptionSpec) DeepCopy() *SubscriptionSpec { + if in == nil { + return nil + } + out := new(SubscriptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { + *out = *in + if in.Install != nil { + in, out := &in.Install, &out.Install + *out = new(InstallPlanReference) + **out = **in + } + if in.InstallPlanRef != nil { + in, out := &in.InstallPlanRef, &out.InstallPlanRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.CatalogHealth != nil { + in, out := &in.CatalogHealth, &out.CatalogHealth + *out = make([]SubscriptionCatalogHealth, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]SubscriptionCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.LastUpdated.DeepCopyInto(&out.LastUpdated) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus. +func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus { + if in == nil { + return nil + } + out := new(SubscriptionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) { + *out = *in + if in.RegistryPoll != nil { + in, out := &in.RegistryPoll, &out.RegistryPoll + *out = new(RegistryPoll) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy. +func (in *UpdateStrategy) DeepCopy() *UpdateStrategy { + if in == nil { + return nil + } + out := new(UpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookDescription) DeepCopyInto(out *WebhookDescription) { + *out = *in + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(intstr.IntOrString) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]admissionregistrationv1.RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(admissionregistrationv1.FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(admissionregistrationv1.MatchPolicyType) + **out = **in + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(admissionregistrationv1.SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ReinvocationPolicy != nil { + in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy + *out = new(admissionregistrationv1.ReinvocationPolicyType) + **out = **in + } + if in.WebhookPath != nil { + in, out := &in.WebhookPath, &out.WebhookPath + *out = new(string) + **out = **in + } + if in.ConversionCRDs != nil { + in, out := &in.ConversionCRDs, &out.ConversionCRDs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookDescription. +func (in *WebhookDescription) DeepCopy() *WebhookDescription { + if in == nil { + return nil + } + out := new(WebhookDescription) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 2c8f4808c1a3..6acf8ab1ea04 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -12,14 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +version: "2" + run: - deadline: 5m + timeout: 5m + +formatters: + enable: + - gofmt + - goimports linters: - disable-all: true + default: none enable: #- bodyclose - # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl @@ -30,28 +36,24 @@ linters: - goconst - gocritic #- gocyclo - - gofmt - - goimports - #- gomnd #- goprintffuncname - gosec - - gosimple - govet - ineffassign #- lll - misspell + #- mnd #- nakedret #- noctx - nolintlint #- rowserrcheck - #- scopelint - staticcheck - #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - - stylecheck - #- typecheck - unconvert #- unparam - unused - # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace - fast: false + exclusions: + presets: + - common-false-positives + - legacy + - std-error-handling diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 71757151c333..8416275f48ee 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,8 +1,14 @@ - -![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) +
+ +cobra-logo + +
Cobra is a library for creating powerful modern CLI applications. +Visit Cobra.dev for extensive documentation + + Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. @@ -11,6 +17,20 @@ name a few. [This list](site/content/projects_using_cobra.md) contains a more ex [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) +
+
+ Supported by: +
+
+ + Warp sponsorship + + +### [Warp, the AI terminal for devs](https://www.warp.dev/cobra) +[Try Cobra in Warp today](https://www.warp.dev/cobra)
+ +
+
# Overview diff --git a/vendor/github.com/spf13/cobra/SECURITY.md b/vendor/github.com/spf13/cobra/SECURITY.md new file mode 100644 index 000000000000..54e60c28c14c --- /dev/null +++ b/vendor/github.com/spf13/cobra/SECURITY.md @@ -0,0 +1,105 @@ +# Security Policy + +## Reporting a Vulnerability + +The `cobra` maintainers take security issues seriously and +we appreciate your efforts to _**responsibly**_ disclose your findings. +We will make every effort to swiftly respond and address concerns. + +To report a security vulnerability: + +1. **DO NOT** create a public GitHub issue for the vulnerability! +2. **DO NOT** create a public GitHub Pull Request with a fix for the vulnerability! +3. Send an email to `cobra-security@googlegroups.com`. +4. Include the following details in your report: + - Description of the vulnerability + - Steps to reproduce + - Potential impact of the vulnerability (to your downstream project, to the Go ecosystem, etc.) + - Any potential mitigations you've already identified +5. Allow up to 7 days for an initial response. + You should receive an acknowledgment of your report and an estimated timeline for a fix. +6. (Optional) If you have a fix and would like to contribute your patch, please work + directly with the maintainers via `cobra-security@googlegroups.com` to + coordinate pushing the patch to GitHub, cutting a new release, and disclosing the change. + +## Response Process + +When a security vulnerability report is received, the `cobra` maintainers will: + +1. Confirm receipt of the vulnerability report within 7 days. +2. Assess the report to determine if it constitutes a security vulnerability. +3. If confirmed, assign the vulnerability a severity level and create a timeline for addressing it. +4. Develop and test a fix. +5. Patch the vulnerability and make a new GitHub release: the maintainers will coordinate disclosure with the reporter. +6. Create a new GitHub Security Advisory to inform the broader Go ecosystem + +## Disclosure Policy + +The `cobra` maintainers follow a coordinated disclosure process: + +1. Security vulnerabilities will be addressed as quickly as possible. +2. A CVE (Common Vulnerabilities and Exposures) identifier will be requested for significant vulnerabilities + that are within `cobra` itself. +3. Once a fix is ready, the maintainers will: + - Release a new version containing the fix. + - Update the security advisory with details about the vulnerability. + - Credit the reporter (unless they wish to remain anonymous). + - Credit the fixer (unless they wish to remain anonymous, this may be the same as the reporter). + - Announce the vulnerability through appropriate channels + (GitHub Security Advisory, mailing lists, GitHub Releases, etc.) + +## Supported Versions + +Security fixes will typically only be released for the most recent major release. + +## Upstream Security Issues + +`cobra` generally will not accept vulnerability reports that originate in upstream +dependencies. I.e., if there is a problem in Go code that `cobra` depends on, +it is best to engage that project's maintainers and owners. + +This security policy primarily pertains only to `cobra` itself but if you believe you've +identified a problem that originates in an upstream dependency and is being widely +distributed by `cobra`, please follow the disclosure procedure above: the `cobra` +maintainers will work with you to determine the severity and ecosystem impact. + +## Security Updates and CVEs + +Information about known security vulnerabilities and CVEs affecting `cobra` will +be published as GitHub Security Advisories at +https://github.com/spf13/cobra/security/advisories. + +All users are encouraged to watch the repository and upgrade promptly when +security releases are published. + +## `cobra` Security Best Practices for Users + +When using `cobra` in your CLIs, the `cobra` maintainers recommend the following: + +1. Always use the latest version of `cobra`. +2. [Use Go modules](https://go.dev/blog/using-go-modules) for dependency management. +3. Always use the latest possible version of Go. + +## Security Best Practices for Contributors + +When contributing to `cobra`: + +1. Be mindful of security implications when adding new features or modifying existing ones. +2. Be aware of `cobra`'s extremely large reach: it is used in nearly every Go CLI + (like Kubernetes, Docker, Prometheus, etc. etc.) +3. Write tests that explicitly cover edge cases and potential issues. +4. If you discover a security issue while working on `cobra`, please report it + following the process above rather than opening a public pull request or issue that + addresses the vulnerability. +5. Take personal sec-ops seriously and secure your GitHub account: use [two-factor authentication](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa), + [sign your commits with a GPG or SSH key](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification), + etc. + +## Acknowledgments + +The `cobra` maintainers would like to thank all security researchers and +community members who help keep cobra, its users, and the entire Go ecosystem secure through responsible disclosures!! + +--- + +*This security policy is inspired by the [Open Web Application Security Project (OWASP)](https://owasp.org/) guidelines and security best practices.* diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index dbb2c298ba08..78088db69ca2 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -39,7 +39,7 @@ const ( ) // FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist +type FParseErrWhitelist flag.ParseErrorsAllowlist // Group Structure to manage groups for commands type Group struct { @@ -1296,6 +1296,11 @@ Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, c.Printf("Unknown help topic %#q\n", args) CheckErr(c.Root().Usage()) } else { + // FLow the context down to be used in help text + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown CheckErr(cmd.Help()) @@ -1872,7 +1877,7 @@ func (c *Command) ParseFlags(args []string) error { c.mergePersistentFlags() // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + c.Flags().ParseErrorsAllowlist = flag.ParseErrorsAllowlist(c.FParseErrWhitelist) err := c.Flags().Parse(args) // Print warnings if they occurred (e.g. deprecated flag messages). @@ -2020,7 +2025,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) } if c.HasHelpSubCommands() { - fmt.Fprintf(w, "\n\nAdditional help topcis:") + fmt.Fprintf(w, "\n\nAdditional help topics:") for _, subcmd := range c.Commands() { if subcmd.IsAdditionalHelpTopicCommand() { fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index a1752f763175..d3607c2d2fef 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -115,6 +115,13 @@ type CompletionOptions struct { DisableDescriptions bool // HiddenDefaultCmd makes the default 'completion' command hidden HiddenDefaultCmd bool + // DefaultShellCompDirective sets the ShellCompDirective that is returned + // if no special directive can be determined + DefaultShellCompDirective *ShellCompDirective +} + +func (receiver *CompletionOptions) SetDefaultShellCompDirective(directive ShellCompDirective) { + receiver.DefaultShellCompDirective = &directive } // Completion is a string that can be used for completions @@ -375,7 +382,7 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo // Error while attempting to parse flags if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + if _, ok := flagErr.(*flagCompError); !ok || flagCompletion { return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } @@ -480,6 +487,14 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo } } else { directive = ShellCompDirectiveDefault + // check current and parent commands for a custom DefaultShellCompDirective + for cmd := finalCmd; cmd != nil; cmd = cmd.parent { + if cmd.CompletionOptions.DefaultShellCompDirective != nil { + directive = *cmd.CompletionOptions.DefaultShellCompDirective + break + } + } + if flag == nil { foundLocalNonPersistentFlag := false // If TraverseChildren is true on the root command we don't check for @@ -773,7 +788,7 @@ See each sub-command's help for details on how to use the generated script. // shell completion for it (prog __complete completion '') subCmd, cmdArgs, err := c.Find(args) if err != nil || subCmd.Name() != compCmdName && - !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + (subCmd.Name() != ShellCompRequestCmd || len(cmdArgs) <= 1 || cmdArgs[0] != compCmdName) { // The completion command is not being called or being completed so we remove it. c.RemoveCommand(completionCmd) return diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index d4dfbc5ea0b3..eeed1e92b0a5 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -137,12 +137,16 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// DEPRECATED: please use ParseErrorsAllowlist instead +// This type will be removed in a future release +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -158,8 +162,12 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // DEPRECATED: please use ParseErrorsAllowlist instead + // This field will be removed in a future release + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -928,7 +936,6 @@ func VarP(value Value, name, shorthand, usage string) { // returns the error. func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -986,6 +993,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -1044,6 +1053,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1158,12 +1169,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true + f.args = make([]string, 0, len(arguments)) + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1174,7 +1185,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if errors.Is(err, ErrHelp) { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1200,6 +1214,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if errors.Is(err, ErrHelp) { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go index f563907e28ff..e62eab53810c 100644 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -8,6 +8,7 @@ import ( goflag "flag" "reflect" "strings" + "time" ) // go test flags prefixes @@ -113,6 +114,38 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + // ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), // since by default those are skipped by pflag.Parse(). // Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` @@ -125,3 +158,4 @@ func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { } return goFlagSet.Parse(skippedFlags) } + diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go index 890a01afc030..1d1e3bf91a35 100644 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ b/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/vendor/github.com/spf13/pflag/time.go b/vendor/github.com/spf13/pflag/time.go index dc024807e0c7..3dee424791a8 100644 --- a/vendor/github.com/spf13/pflag/time.go +++ b/vendor/github.com/spf13/pflag/time.go @@ -48,7 +48,13 @@ func (d *timeValue) Type() string { return "time" } -func (d *timeValue) String() string { return d.Time.Format(time.RFC3339Nano) } +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} // GetTime return the time value of a flag with the given name func (f *FlagSet) GetTime(name string) (time.Time, error) { diff --git a/vendor/github.com/stoewer/go-strcase/.golangci.yml b/vendor/github.com/stoewer/go-strcase/.golangci.yml index 7f98d55c4249..0e75d86ae032 100644 --- a/vendor/github.com/stoewer/go-strcase/.golangci.yml +++ b/vendor/github.com/stoewer/go-strcase/.golangci.yml @@ -1,26 +1,19 @@ -run: - deadline: 10m +version: "2" linters: enable: - - dupl - - goconst - - gocyclo - - godox - - gosec - - interfacer - - lll - - maligned - - misspell - - prealloc - - stylecheck - - unconvert - - unparam - - errcheck - - golint - - gofmt - disable: [] - fast: false + - dupl + - goconst + - gocyclo + - godox + - gosec + - lll + - misspell + - prealloc + - staticcheck + - unconvert + - unparam -issues: - exclude-use-default: false +formatters: + enable: + - gofmt diff --git a/vendor/github.com/stoewer/go-strcase/camel.go b/vendor/github.com/stoewer/go-strcase/camel.go index ff9e66e0ce13..7a9bec7c10e5 100644 --- a/vendor/github.com/stoewer/go-strcase/camel.go +++ b/vendor/github.com/stoewer/go-strcase/camel.go @@ -30,6 +30,9 @@ func camelCase(s string, upper bool) string { } else if isUpper(prev) && isUpper(curr) && isLower(next) { // Assume a case like "R" for "XRequestId" buffer = append(buffer, curr) + } else if isUpper(curr) && isDigit(prev) { + // Preserve uppercase letters after numbers + buffer = append(buffer, curr) } else { buffer = append(buffer, toLower(curr)) } diff --git a/vendor/github.com/stoewer/go-strcase/helper.go b/vendor/github.com/stoewer/go-strcase/helper.go index ecad58914393..96e79d6e134d 100644 --- a/vendor/github.com/stoewer/go-strcase/helper.go +++ b/vendor/github.com/stoewer/go-strcase/helper.go @@ -38,6 +38,12 @@ func isSpace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } +// isDigit checks if a character is a digit. More precisely it evaluates if it is +// in the range of ASCII characters '0' to '9'. +func isDigit(ch rune) bool { + return ch >= '0' && ch <= '9' +} + // isDelimiter checks if a character is some kind of whitespace or '_' or '-'. func isDelimiter(ch rune) bool { return ch == '-' || ch == '_' || isSpace(ch) diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index 114fca619b86..efc89deff38a 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -948,8 +948,6 @@ func (args Arguments) Is(objects ...interface{}) bool { return true } -type outputRenderer func() string - // Diff gets a string describing the differences between the arguments // and the specified objects. // @@ -957,7 +955,7 @@ type outputRenderer func() string func (args Arguments) Diff(objects []interface{}) (string, int) { // TODO: could return string as error and nil for No difference - var outputBuilder strings.Builder + output := "\n" var differences int maxArgCount := len(args) @@ -965,35 +963,24 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { maxArgCount = len(objects) } - outputRenderers := []outputRenderer{} - for i := 0; i < maxArgCount; i++ { - i := i var actual, expected interface{} - var actualFmt, expectedFmt func() string + var actualFmt, expectedFmt string if len(objects) <= i { actual = "(Missing)" - actualFmt = func() string { - return "(Missing)" - } + actualFmt = "(Missing)" } else { actual = objects[i] - actualFmt = func() string { - return fmt.Sprintf("(%[1]T=%[1]v)", actual) - } + actualFmt = fmt.Sprintf("(%[1]T=%[1]v)", actual) } if len(args) <= i { expected = "(Missing)" - expectedFmt = func() string { - return "(Missing)" - } + expectedFmt = "(Missing)" } else { expected = args[i] - expectedFmt = func() string { - return fmt.Sprintf("(%[1]T=%[1]v)", expected) - } + expectedFmt = fmt.Sprintf("(%[1]T=%[1]v)", expected) } if matcher, ok := expected.(argumentMatcher); ok { @@ -1001,22 +988,16 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { func() { defer func() { if r := recover(); r != nil { - actualFmt = func() string { - return fmt.Sprintf("panic in argument matcher: %v", r) - } + actualFmt = fmt.Sprintf("panic in argument matcher: %v", r) } }() matches = matcher.Matches(actual) }() if matches { - outputRenderers = append(outputRenderers, func() string { - return fmt.Sprintf("\t%d: PASS: %s matched by %s\n", i, actualFmt(), matcher) - }) + output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) } else { differences++ - outputRenderers = append(outputRenderers, func() string { - return fmt.Sprintf("\t%d: FAIL: %s not matched by %s\n", i, actualFmt(), matcher) - }) + output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) } } else { switch expected := expected.(type) { @@ -1025,17 +1006,13 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { if reflect.TypeOf(actual).Name() != string(expected) && reflect.TypeOf(actual).String() != string(expected) { // not match differences++ - outputRenderers = append(outputRenderers, func() string { - return fmt.Sprintf("\t%d: FAIL: type %s != type %s - %s\n", i, expected, reflect.TypeOf(actual).Name(), actualFmt()) - }) + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) } case *IsTypeArgument: actualT := reflect.TypeOf(actual) if actualT != expected.t { differences++ - outputRenderers = append(outputRenderers, func() string { - return fmt.Sprintf("\t%d: FAIL: type %s != type %s - %s\n", i, expected.t.Name(), actualT.Name(), actualFmt()) - }) + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt) } case *FunctionalOptionsArgument: var name string @@ -1046,36 +1023,26 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { const tName = "[]interface{}" if name != reflect.TypeOf(actual).String() && len(expected.values) != 0 { differences++ - outputRenderers = append(outputRenderers, func() string { - return fmt.Sprintf("\t%d: FAIL: type %s != type %s - %s\n", i, tName, reflect.TypeOf(actual).Name(), actualFmt()) - }) + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) } else { if ef, af := assertOpts(expected.values, actual); ef == "" && af == "" { // match - outputRenderers = append(outputRenderers, func() string { - return fmt.Sprintf("\t%d: PASS: %s == %s\n", i, tName, tName) - }) + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) } else { // not match differences++ - outputRenderers = append(outputRenderers, func() string { - return fmt.Sprintf("\t%d: FAIL: %s != %s\n", i, af, ef) - }) + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) } } default: if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { // match - outputRenderers = append(outputRenderers, func() string { - return fmt.Sprintf("\t%d: PASS: %s == %s\n", i, actualFmt(), expectedFmt()) - }) + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) } else { // not match differences++ - outputRenderers = append(outputRenderers, func() string { - return fmt.Sprintf("\t%d: FAIL: %s != %s\n", i, actualFmt(), expectedFmt()) - }) + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) } } } @@ -1086,12 +1053,7 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { return "No differences.", differences } - outputBuilder.WriteString("\n") - for _, r := range outputRenderers { - outputBuilder.WriteString(r()) - } - - return outputBuilder.String(), differences + return output, differences } // Assert compares the arguments with the specified objects and fails if diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index 4571a5ca3978..ca4544f0dae7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package tracetransform provides conversion functionality for the otlptrace +// exporters. package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 4abf48d1f622..6eacdf311d28 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -1,9 +1,11 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package envconfig provides functionality to parse configuration from +// environment variables. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" import ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go index 97cd6c54f709..b6e6b10fbf15 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package internal provides internal functionally for the otlptracegrpc package. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go index 7bb189a94bca..1d840be205b7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors @@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "TRACES_CLIENT_CERTIFICATE", + "TRACES_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 0a317d92637d..506ca00b61f9 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -1,9 +1,10 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package otlpconfig provides configuration for the otlptrace exporters. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" import ( @@ -52,7 +53,9 @@ type ( // gRPC configurations GRPCCredentials credentials.TransportCredentials - Proxy HTTPTransportProxyFunc + // HTTP configurations + Proxy HTTPTransportProxyFunc + HTTPClient *http.Client } Config struct { @@ -349,3 +352,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption { return cfg }) } + +func WithHTTPClient(c *http.Client) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.HTTPClient = c + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go index 3d4f699d4778..91849038722e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go index 38b97a013134..ba6e411835f4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go index a12ea4c48eb0..1c4659423361 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 1c5450ab62d9..777e68a7bbd6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index 00ab1f20c6d4..2da2298701b2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -199,8 +199,9 @@ func WithTimeout(duration time.Duration) Option { // explicitly returns a backoff time in the response. That time will take // precedence over these settings. // -// These settings do not define any network retry strategy. That is entirely -// handled by the gRPC ClientConn. +// These settings define the retry strategy implemented by the exporter. +// These settings do not define any network retry strategy. +// That is handled by the gRPC ClientConn. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index f156ee66720c..5f78bfdfb069 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.34.0" + return "1.36.0" } diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go index dd1b73f1e997..892864ea6295 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type TraceServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) } @@ -48,8 +46,6 @@ func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceR // All implementations must embed UnimplementedTraceServiceServer // for forward compatibility type TraceServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) mustEmbedUnimplementedTraceServiceServer() } diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index 852209b097bd..a7c5d19bff38 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -430,6 +430,101 @@ func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { return 0 } +// A reference to an Entity. +// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs. +// +// Status: [Development] +type EntityRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The Schema URL, if known. This is the identifier of the Schema that the entity data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // + // This schema_url applies to the data in this message and to the Resource attributes + // referenced by id_keys and description_keys. + // TODO: discuss if we are happy with this somewhat complicated definition of what + // the schema_url applies to. + // + // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs. + SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` + // Defines the type of the entity. MUST not change during the lifetime of the entity. + // For example: "service" or "host". This field is required and MUST not be empty + // for valid entities. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // Attribute Keys that identify the entity. + // MUST not change during the lifetime of the entity. The Id must contain at least one attribute. + // These keys MUST exist in the containing {message}.attributes. + IdKeys []string `protobuf:"bytes,3,rep,name=id_keys,json=idKeys,proto3" json:"id_keys,omitempty"` + // Descriptive (non-identifying) attribute keys of the entity. + // MAY change over the lifetime of the entity. MAY be empty. + // These attribute keys are not part of entity's identity. + // These keys MUST exist in the containing {message}.attributes. + DescriptionKeys []string `protobuf:"bytes,4,rep,name=description_keys,json=descriptionKeys,proto3" json:"description_keys,omitempty"` +} + +func (x *EntityRef) Reset() { + *x = EntityRef{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntityRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntityRef) ProtoMessage() {} + +func (x *EntityRef) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntityRef.ProtoReflect.Descriptor instead. +func (*EntityRef) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{5} +} + +func (x *EntityRef) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +func (x *EntityRef) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EntityRef) GetIdKeys() []string { + if x != nil { + return x.IdKeys + } + return nil +} + +func (x *EntityRef) GetDescriptionKeys() []string { + if x != nil { + return x.DescriptionKeys + } + return nil +} + var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ @@ -488,15 +583,23 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, - 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x82, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x66, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, + 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, + 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -511,13 +614,14 @@ func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { return file_opentelemetry_proto_common_v1_common_proto_rawDescData } -var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope + (*EntityRef)(nil), // 5: opentelemetry.proto.common.v1.EntityRef } var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue @@ -599,6 +703,18 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { return nil } } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntityRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{ (*AnyValue_StringValue)(nil), @@ -615,7 +731,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index b7545b03b9fe..eb7745d66e02 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -48,6 +48,12 @@ type Resource struct { // dropped_attributes_count is the number of dropped attributes. If the value is 0, then // no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Set of entities that participate in this Resource. + // + // Note: keys in the references MUST exist in attributes of this message. + // + // Status: [Development] + EntityRefs []*v1.EntityRef `protobuf:"bytes,3,rep,name=entity_refs,json=entityRefs,proto3" json:"entity_refs,omitempty"` } func (x *Resource) Reset() { @@ -96,6 +102,13 @@ func (x *Resource) GetDroppedAttributesCount() uint32 { return 0 } +func (x *Resource) GetEntityRefs() []*v1.EntityRef { + if x != nil { + return x.EntityRefs + } + return nil +} + var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ @@ -106,7 +119,7 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd8, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, @@ -115,16 +128,21 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, - 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, - 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, + 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x52, 0x0a, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x73, 0x42, 0x83, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, + 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, + 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1f, 0x4f, + 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -141,16 +159,18 @@ func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte { var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{ - (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource - (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue + (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource + (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue + (*v1.EntityRef)(nil), // 2: opentelemetry.proto.common.v1.EntityRef } var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: opentelemetry.proto.resource.v1.Resource.entity_refs:type_name -> opentelemetry.proto.common.v1.EntityRef + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() } diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go deleted file mode 100644 index 2c033dff47e9..000000000000 --- a/vendor/golang.org/x/exp/constraints/constraints.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package constraints defines a set of useful constraints to be used -// with type parameters. -package constraints - -// Signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type Signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// Unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type Unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// Integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type Integer interface { - Signed | Unsigned -} - -// Float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type Float interface { - ~float32 | ~float64 -} - -// Complex is a constraint that permits any complex numeric type. -// If future releases of Go add new predeclared complex numeric types, -// this constraint will be modified to include them. -type Complex interface { - ~complex64 | ~complex128 -} - -// Ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -type Ordered interface { - Integer | Float | ~string -} diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go deleted file mode 100644 index fbf1934a0617..000000000000 --- a/vendor/golang.org/x/exp/slices/cmp.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// min is a version of the predeclared function from the Go 1.21 release. -func min[T constraints.Ordered](a, b T) T { - if a < b || isNaN(a) { - return a - } - return b -} - -// max is a version of the predeclared function from the Go 1.21 release. -func max[T constraints.Ordered](a, b T) T { - if a > b || isNaN(a) { - return a - } - return b -} - -// cmpLess is a copy of cmp.Less from the Go 1.21 release. -func cmpLess[T constraints.Ordered](x, y T) bool { - return (isNaN(x) && !isNaN(y)) || x < y -} - -// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. -func cmpCompare[T constraints.Ordered](x, y T) int { - xNaN := isNaN(x) - yNaN := isNaN(y) - if xNaN && yNaN { - return 0 - } - if xNaN || x < y { - return -1 - } - if yNaN || x > y { - return +1 - } - return 0 -} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 46ceac343995..da0df370da15 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -6,9 +6,8 @@ package slices import ( - "unsafe" - - "golang.org/x/exp/constraints" + "cmp" + "slices" ) // Equal reports whether two slices are equal: the same length and all @@ -16,16 +15,10 @@ import ( // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. +// +//go:fix inline func Equal[S ~[]E, E comparable](s1, s2 S) bool { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - if s1[i] != s2[i] { - return false - } - } - return true + return slices.Equal(s1, s2) } // EqualFunc reports whether two slices are equal using an equality @@ -33,17 +26,10 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool { // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. +// +//go:fix inline func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { - if len(s1) != len(s2) { - return false - } - for i, v1 := range s1 { - v2 := s2[i] - if !eq(v1, v2) { - return false - } - } - return true + return slices.EqualFunc(s1, s2, eq) } // Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair @@ -53,20 +39,10 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { - for i, v1 := range s1 { - if i >= len(s2) { - return +1 - } - v2 := s2[i] - if c := cmpCompare(v1, v2); c != 0 { - return c - } - } - if len(s1) < len(s2) { - return -1 - } - return 0 +// +//go:fix inline +func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int { + return slices.Compare(s1, s2) } // CompareFunc is like [Compare] but uses a custom comparison function on each @@ -74,53 +50,41 @@ func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). +// +//go:fix inline func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { - for i, v1 := range s1 { - if i >= len(s2) { - return +1 - } - v2 := s2[i] - if c := cmp(v1, v2); c != 0 { - return c - } - } - if len(s1) < len(s2) { - return -1 - } - return 0 + return slices.CompareFunc(s1, s2, cmp) } // Index returns the index of the first occurrence of v in s, // or -1 if not present. +// +//go:fix inline func Index[S ~[]E, E comparable](s S, v E) int { - for i := range s { - if v == s[i] { - return i - } - } - return -1 + return slices.Index(s, v) } // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. +// +//go:fix inline func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 + return slices.IndexFunc(s, f) } // Contains reports whether v is present in s. +// +//go:fix inline func Contains[S ~[]E, E comparable](s S, v E) bool { - return Index(s, v) >= 0 + return slices.Contains(s, v) } // ContainsFunc reports whether at least one // element e of s satisfies f(e). +// +//go:fix inline func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { - return IndexFunc(s, f) >= 0 + return slices.ContainsFunc(s, f) } // Insert inserts the values v... into s at index i, @@ -130,93 +94,10 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { // and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). +// +//go:fix inline func Insert[S ~[]E, E any](s S, i int, v ...E) S { - m := len(v) - if m == 0 { - return s - } - n := len(s) - if i == n { - return append(s, v...) - } - if n+m > cap(s) { - // Use append rather than make so that we bump the size of - // the slice up to the next storage class. - // This is what Grow does but we don't call Grow because - // that might copy the values twice. - s2 := append(s[:i], make(S, n+m-i)...) - copy(s2[i:], v) - copy(s2[i+m:], s[i:]) - return s2 - } - s = s[:n+m] - - // before: - // s: aaaaaaaabbbbccccccccdddd - // ^ ^ ^ ^ - // i i+m n n+m - // after: - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // - // a are the values that don't move in s. - // v are the values copied in from v. - // b and c are the values from s that are shifted up in index. - // d are the values that get overwritten, never to be seen again. - - if !overlaps(v, s[i+m:]) { - // Easy case - v does not overlap either the c or d regions. - // (It might be in some of a or b, or elsewhere entirely.) - // The data we copy up doesn't write to v at all, so just do it. - - copy(s[i+m:], s[i:]) - - // Now we have - // s: aaaaaaaabbbbbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // Note the b values are duplicated. - - copy(s[i:], v) - - // Now we have - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // That's the result we want. - return s - } - - // The hard case - v overlaps c or d. We can't just shift up - // the data because we'd move or clobber the values we're trying - // to insert. - // So instead, write v on top of d, then rotate. - copy(s[n:], v) - - // Now we have - // s: aaaaaaaabbbbccccccccvvvv - // ^ ^ ^ ^ - // i i+m n n+m - - rotateRight(s[i:], m) - - // Now we have - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // That's the result we want. - return s -} - -// clearSlice sets all elements up to the length of s to the zero value of E. -// We may use the builtin clear func instead, and remove clearSlice, when upgrading -// to Go 1.21+. -func clearSlice[S ~[]E, E any](s S) { - var zero E - for i := range s { - s[i] = zero - } + return slices.Insert(s, i, v...) } // Delete removes the elements s[i:j] from s, returning the modified slice. @@ -224,136 +105,36 @@ func clearSlice[S ~[]E, E any](s S) { // Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. // Delete zeroes the elements s[len(s)-(j-i):len(s)]. +// +//go:fix inline func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j:len(s)] // bounds check - - if i == j { - return s - } - - oldlen := len(s) - s = append(s[:i], s[j:]...) - clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC - return s + return slices.Delete(s, i, j) } // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. // DeleteFunc zeroes the elements between the new length and the original length. +// +//go:fix inline func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { - i := IndexFunc(s, del) - if i == -1 { - return s - } - // Don't start copying elements until we find one to delete. - for j := i + 1; j < len(s); j++ { - if v := s[j]; !del(v) { - s[i] = v - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] + return slices.DeleteFunc(s, del) } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. // When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. +// +//go:fix inline func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { - _ = s[i:j] // verify that i:j is a valid subslice - - if i == j { - return Insert(s, i, v...) - } - if j == len(s) { - return append(s[:i], v...) - } - - tot := len(s[:i]) + len(v) + len(s[j:]) - if tot > cap(s) { - // Too big to fit, allocate and copy over. - s2 := append(s[:i], make(S, tot-i)...) // See Insert - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 - } - - r := s[:tot] - - if i+len(v) <= j { - // Easy, as v fits in the deleted portion. - copy(r[i:], v) - if i+len(v) != j { - copy(r[i+len(v):], s[j:]) - } - clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC - return r - } - - // We are expanding (v is bigger than j-i). - // The situation is something like this: - // (example has i=4,j=8,len(s)=16,len(v)=6) - // s: aaaaxxxxbbbbbbbbyy - // ^ ^ ^ ^ - // i j len(s) tot - // a: prefix of s - // x: deleted range - // b: more of s - // y: area to expand into - - if !overlaps(r[i+len(v):], v) { - // Easy, as v is not clobbered by the first copy. - copy(r[i+len(v):], s[j:]) - copy(r[i:], v) - return r - } - - // This is a situation where we don't have a single place to which - // we can copy v. Parts of it need to go to two different places. - // We want to copy the prefix of v into y and the suffix into x, then - // rotate |y| spots to the right. - // - // v[2:] v[:2] - // | | - // s: aaaavvvvbbbbbbbbvv - // ^ ^ ^ ^ - // i j len(s) tot - // - // If either of those two destinations don't alias v, then we're good. - y := len(v) - (j - i) // length of y portion - - if !overlaps(r[i:j], v) { - copy(r[i:j], v[y:]) - copy(r[len(s):], v[:y]) - rotateRight(r[i:], y) - return r - } - if !overlaps(r[len(s):], v) { - copy(r[len(s):], v[:y]) - copy(r[i:j], v[y:]) - rotateRight(r[i:], y) - return r - } - - // Now we know that v overlaps both x and y. - // That means that the entirety of b is *inside* v. - // So we don't need to preserve b at all; instead we - // can copy v first, then copy the b part of v out of - // v to the right destination. - k := startIdx(v, s[j:]) - copy(r[i:], v) - copy(r[i+len(v):], r[i+k:]) - return r + return slices.Replace(s, i, j, v...) } // Clone returns a copy of the slice. // The elements are copied using assignment, so this is a shallow clone. +// +//go:fix inline func Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) + return slices.Clone(s) } // Compact replaces consecutive runs of equal elements with a single copy. @@ -361,155 +142,41 @@ func Clone[S ~[]E, E any](s S) S { // Compact modifies the contents of the slice s and returns the modified slice, // which may have a smaller length. // Compact zeroes the elements between the new length and the original length. +// +//go:fix inline func Compact[S ~[]E, E comparable](s S) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if s[k] != s[k-1] { - if i != k { - s[i] = s[k] - } - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] + return slices.Compact(s) } // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. // CompactFunc zeroes the elements between the new length and the original length. +// +//go:fix inline func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if !eq(s[k], s[k-1]) { - if i != k { - s[i] = s[k] - } - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] + return slices.CompactFunc(s, eq) } // Grow increases the slice's capacity, if necessary, to guarantee space for // another n elements. After Grow(n), at least n elements can be appended // to the slice without another allocation. If n is negative or too large to // allocate the memory, Grow panics. +// +//go:fix inline func Grow[S ~[]E, E any](s S, n int) S { - if n < 0 { - panic("cannot be negative") - } - if n -= cap(s) - len(s); n > 0 { - // TODO(https://go.dev/issue/53888): Make using []E instead of S - // to workaround a compiler bug where the runtime.growslice optimization - // does not take effect. Revert when the compiler is fixed. - s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] - } - return s + return slices.Grow(s, n) } // Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. -func Clip[S ~[]E, E any](s S) S { - return s[:len(s):len(s)] -} - -// Rotation algorithm explanation: -// -// rotate left by 2 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join first parts -// 89234567 01 -// recursively rotate first left part by 2 -// 23456789 01 -// join at the end -// 2345678901 // -// rotate left by 8 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join last parts -// 89 23456701 -// recursively rotate second part left by 6 -// 89 01234567 -// join at the end -// 8901234567 - -// TODO: There are other rotate algorithms. -// This algorithm has the desirable property that it moves each element exactly twice. -// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. -// The follow-cycles algorithm can be 1-write but it is not very cache friendly. - -// rotateLeft rotates b left by n spaces. -// s_final[i] = s_orig[i+r], wrapping around. -func rotateLeft[E any](s []E, r int) { - for r != 0 && r != len(s) { - if r*2 <= len(s) { - swap(s[:r], s[len(s)-r:]) - s = s[:len(s)-r] - } else { - swap(s[:len(s)-r], s[r:]) - s, r = s[len(s)-r:], r*2-len(s) - } - } -} -func rotateRight[E any](s []E, r int) { - rotateLeft(s, len(s)-r) -} - -// swap swaps the contents of x and y. x and y must be equal length and disjoint. -func swap[E any](x, y []E) { - for i := 0; i < len(x); i++ { - x[i], y[i] = y[i], x[i] - } -} - -// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. -func overlaps[E any](a, b []E) bool { - if len(a) == 0 || len(b) == 0 { - return false - } - elemSize := unsafe.Sizeof(a[0]) - if elemSize == 0 { - return false - } - // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. - // Also see crypto/internal/alias/alias.go:AnyOverlap - return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && - uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) -} - -// startIdx returns the index in haystack where the needle starts. -// prerequisite: the needle must be aliased entirely inside the haystack. -func startIdx[E any](haystack, needle []E) int { - p := &needle[0] - for i := range haystack { - if p == &haystack[i] { - return i - } - } - // TODO: what if the overlap is by a non-integral number of Es? - panic("needle not found") +//go:fix inline +func Clip[S ~[]E, E any](s S) S { + return slices.Clip(s) } // Reverse reverses the elements of the slice in place. +// +//go:fix inline func Reverse[S ~[]E, E any](s S) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } + slices.Reverse(s) } diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index f58bbc7ba4de..bd91a8d402ff 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -2,21 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp - package slices import ( - "math/bits" - - "golang.org/x/exp/constraints" + "cmp" + "slices" ) // Sort sorts a slice of any ordered type in ascending order. // When sorting floating-point numbers, NaNs are ordered before other values. -func Sort[S ~[]E, E constraints.Ordered](x S) { - n := len(x) - pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +// +//go:fix inline +func Sort[S ~[]E, E cmp.Ordered](x S) { + slices.Sort(x) } // SortFunc sorts the slice x in ascending order as determined by the cmp @@ -28,119 +26,79 @@ func Sort[S ~[]E, E constraints.Ordered](x S) { // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. // To indicate 'uncomparable', return 0 from the function. +// +//go:fix inline func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { - n := len(x) - pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) + slices.SortFunc(x, cmp) } // SortStableFunc sorts the slice x while keeping the original order of equal // elements, using cmp to compare elements in the same way as [SortFunc]. +// +//go:fix inline func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { - stableCmpFunc(x, len(x), cmp) + slices.SortStableFunc(x, cmp) } // IsSorted reports whether x is sorted in ascending order. -func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { - for i := len(x) - 1; i > 0; i-- { - if cmpLess(x[i], x[i-1]) { - return false - } - } - return true +// +//go:fix inline +func IsSorted[S ~[]E, E cmp.Ordered](x S) bool { + return slices.IsSorted(x) } // IsSortedFunc reports whether x is sorted in ascending order, with cmp as the // comparison function as defined by [SortFunc]. +// +//go:fix inline func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { - for i := len(x) - 1; i > 0; i-- { - if cmp(x[i], x[i-1]) < 0 { - return false - } - } - return true + return slices.IsSortedFunc(x, cmp) } // Min returns the minimal value in x. It panics if x is empty. // For floating-point numbers, Min propagates NaNs (any NaN value in x // forces the output to be NaN). -func Min[S ~[]E, E constraints.Ordered](x S) E { - if len(x) < 1 { - panic("slices.Min: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - m = min(m, x[i]) - } - return m +// +//go:fix inline +func Min[S ~[]E, E cmp.Ordered](x S) E { + return slices.Min(x) } // MinFunc returns the minimal value in x, using cmp to compare elements. // It panics if x is empty. If there is more than one minimal element // according to the cmp function, MinFunc returns the first one. +// +//go:fix inline func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { - if len(x) < 1 { - panic("slices.MinFunc: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - if cmp(x[i], m) < 0 { - m = x[i] - } - } - return m + return slices.MinFunc(x, cmp) } // Max returns the maximal value in x. It panics if x is empty. // For floating-point E, Max propagates NaNs (any NaN value in x // forces the output to be NaN). -func Max[S ~[]E, E constraints.Ordered](x S) E { - if len(x) < 1 { - panic("slices.Max: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - m = max(m, x[i]) - } - return m +// +//go:fix inline +func Max[S ~[]E, E cmp.Ordered](x S) E { + return slices.Max(x) } // MaxFunc returns the maximal value in x, using cmp to compare elements. // It panics if x is empty. If there is more than one maximal element // according to the cmp function, MaxFunc returns the first one. +// +//go:fix inline func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { - if len(x) < 1 { - panic("slices.MaxFunc: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - if cmp(x[i], m) > 0 { - m = x[i] - } - } - return m + return slices.MaxFunc(x, cmp) } // BinarySearch searches for target in a sorted slice and returns the position // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. -func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { - // Inlining is faster than calling BinarySearchFunc with a lambda. - n := len(x) - // Define x[-1] < target and x[n] >= target. - // Invariant: x[i-1] < target, x[j] >= target. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmpLess(x[h], target) { - i = h + 1 // preserves x[i-1] < target - } else { - j = h // preserves x[j] >= target - } - } - // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) +// +//go:fix inline +func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) { + return slices.BinarySearch(x, target) } // BinarySearchFunc works like [BinarySearch], but uses a custom comparison @@ -150,48 +108,8 @@ func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { // or a positive number if the slice element follows the target. // cmp must implement the same ordering as the slice, such that if // cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +// +//go:fix inline func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { - n := len(x) - // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . - // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmp(x[h], target) < 0 { - i = h + 1 // preserves cmp(x[i - 1], target) < 0 - } else { - j = h // preserves cmp(x[j], target) >= 0 - } - } - // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. - return i, i < n && cmp(x[i], target) == 0 -} - -type sortedHint int // hint for pdqsort when choosing the pivot - -const ( - unknownHint sortedHint = iota - increasingHint - decreasingHint -) - -// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf -type xorshift uint64 - -func (r *xorshift) Next() uint64 { - *r ^= *r << 13 - *r ^= *r >> 17 - *r ^= *r << 5 - return uint64(*r) -} - -func nextPowerOfTwo(length int) uint { - return 1 << bits.Len(uint(length)) -} - -// isNaN reports whether x is a NaN without requiring the math package. -// This will always return false if T is not floating-point. -func isNaN[T constraints.Ordered](x T) bool { - return x != x + return slices.BinarySearchFunc(x, target, cmp) } diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go deleted file mode 100644 index 06f2c7a2481b..000000000000 --- a/vendor/golang.org/x/exp/slices/zsortanyfunc.go +++ /dev/null @@ -1,479 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -// insertionSortCmpFunc sorts data[a:b] using insertion sort. -func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownCmpFunc implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { - child++ - } - if !(cmp(data[first+root], data[first+child]) < 0) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownCmpFunc(data, i, hi, first, cmp) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownCmpFunc(data, lo, i, first, cmp) - } -} - -// pdqsortCmpFunc sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortCmpFunc(data, a, b, cmp) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortCmpFunc(data, a, b, cmp) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsCmpFunc(data, a, b, cmp) - limit-- - } - - pivot, hint := choosePivotCmpFunc(data, a, b, cmp) - if hint == decreasingHint { - reverseRangeCmpFunc(data, a, b, cmp) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortCmpFunc(data, a, b, cmp) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { - mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) - a = mid - continue - } - - mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortCmpFunc(data, a, mid, limit, cmp) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortCmpFunc(data, mid+1, b, limit, cmp) - b = mid - } - } -} - -// partitionCmpFunc does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && (cmp(data[i], data[a]) < 0) { - i++ - } - for i <= j && !(cmp(data[j], data[a]) < 0) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && (cmp(data[i], data[a]) < 0) { - i++ - } - for i <= j && !(cmp(data[j], data[a]) < 0) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !(cmp(data[a], data[i]) < 0) { - i++ - } - for i <= j && (cmp(data[a], data[j]) < 0) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !(cmp(data[i], data[i-1]) < 0) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !(cmp(data[j], data[j-1]) < 0) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !(cmp(data[j], data[j-1]) < 0) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotCmpFunc chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentCmpFunc(data, i, &swaps, cmp) - j = medianAdjacentCmpFunc(data, j, &swaps, cmp) - k = medianAdjacentCmpFunc(data, k, &swaps, cmp) - } - // Find the median among i, j, k and stores it into j. - j = medianCmpFunc(data, i, j, k, &swaps, cmp) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { - if cmp(data[b], data[a]) < 0 { - *swaps++ - return b, a - } - return a, b -} - -// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { - a, b = order2CmpFunc(data, a, b, swaps, cmp) - b, c = order2CmpFunc(data, b, c, swaps, cmp) - a, b = order2CmpFunc(data, a, b, swaps, cmp) - return b -} - -// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { - return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) -} - -func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortCmpFunc(data, a, b, cmp) - a = b - b += blockSize - } - insertionSortCmpFunc(data, a, n, cmp) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeCmpFunc(data, a, a+blockSize, b, cmp) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeCmpFunc(data, a, m, n, cmp) - } - blockSize *= 2 - } -} - -// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if cmp(data[h], data[a]) < 0 { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !(cmp(data[m], data[h]) < 0) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !(cmp(data[p-c], data[c]) < 0) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateCmpFunc(data, start, m, end, cmp) - } - if a < start && start < mid { - symMergeCmpFunc(data, a, start, mid, cmp) - } - if mid < end && end < b { - symMergeCmpFunc(data, mid, end, b, cmp) - } -} - -// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeCmpFunc(data, m-i, m, j, cmp) - i -= j - } else { - swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) - j -= i - } - } - // i == j - swapRangeCmpFunc(data, m-i, m, i, cmp) -} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go deleted file mode 100644 index 99b47c3986a4..000000000000 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ /dev/null @@ -1,481 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// insertionSortOrdered sorts data[a:b] using insertion sort. -func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownOrdered implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { - child++ - } - if !cmpLess(data[first+root], data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownOrdered(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownOrdered(data, lo, i, first) - } -} - -// pdqsortOrdered sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortOrdered(data, a, b) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortOrdered(data, a, b) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsOrdered(data, a, b) - limit-- - } - - pivot, hint := choosePivotOrdered(data, a, b) - if hint == decreasingHint { - reverseRangeOrdered(data, a, b) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortOrdered(data, a, b) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !cmpLess(data[a-1], data[pivot]) { - mid := partitionEqualOrdered(data, a, b, pivot) - a = mid - continue - } - - mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortOrdered(data, a, mid, limit) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortOrdered(data, mid+1, b, limit) - b = mid - } - } -} - -// partitionOrdered does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && cmpLess(data[i], data[a]) { - i++ - } - for i <= j && !cmpLess(data[j], data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && cmpLess(data[i], data[a]) { - i++ - } - for i <= j && !cmpLess(data[j], data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !cmpLess(data[a], data[i]) { - i++ - } - for i <= j && cmpLess(data[a], data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !cmpLess(data[i], data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !cmpLess(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !cmpLess(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsOrdered scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotOrdered chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentOrdered(data, i, &swaps) - j = medianAdjacentOrdered(data, j, &swaps) - k = medianAdjacentOrdered(data, k, &swaps) - } - // Find the median among i, j, k and stores it into j. - j = medianOrdered(data, i, j, k, &swaps) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if cmpLess(data[b], data[a]) { - *swaps++ - return b, a - } - return a, b -} - -// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { - a, b = order2Ordered(data, a, b, swaps) - b, c = order2Ordered(data, b, c, swaps) - a, b = order2Ordered(data, a, b, swaps) - return b -} - -// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { - return medianOrdered(data, a-1, a, a+1, swaps) -} - -func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableOrdered[E constraints.Ordered](data []E, n int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortOrdered(data, a, b) - a = b - b += blockSize - } - insertionSortOrdered(data, a, n) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeOrdered(data, a, a+blockSize, b) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeOrdered(data, a, m, n) - } - blockSize *= 2 - } -} - -// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if cmpLess(data[h], data[a]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !cmpLess(data[m], data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !cmpLess(data[p-c], data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateOrdered(data, start, m, end) - } - if a < start && start < mid { - symMergeOrdered(data, a, start, mid) - } - if mid < end && end < b { - symMergeOrdered(data, mid, end, b) - } -} - -// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeOrdered(data, m-i, m, j) - i -= j - } else { - swapRangeOrdered(data, m-i, m+j-i, i) - j -= i - } - } - // i == j - swapRangeOrdered(data, m-i, m, i) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 617f2a880d2a..e8161d1cf764 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -358,8 +358,8 @@ github.com/RangelReale/osincli # github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b ## explicit; go 1.15 github.com/ajstarks/svgo -# github.com/antlr4-go/antlr/v4 v4.13.0 -## explicit; go 1.20 +# github.com/antlr4-go/antlr/v4 v4.13.1 +## explicit; go 1.22 github.com/antlr4-go/antlr/v4 # github.com/apparentlymart/go-cidr v1.1.0 ## explicit @@ -448,6 +448,9 @@ github.com/campoy/embedmd/embedmd # github.com/cenkalti/backoff/v4 v4.3.0 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 +# github.com/cenkalti/backoff/v5 v5.0.2 +## explicit; go 1.23 +github.com/cenkalti/backoff/v5 # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -875,7 +878,7 @@ github.com/google/cadvisor/utils/sysfs github.com/google/cadvisor/utils/sysinfo github.com/google/cadvisor/version github.com/google/cadvisor/watcher -# github.com/google/cel-go v0.26.0 +# github.com/google/cel-go v0.26.1 ## explicit; go 1.22.0 github.com/google/cel-go/cel github.com/google/cel-go/checker @@ -1007,7 +1010,7 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 ## explicit; go 1.23.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options @@ -1762,6 +1765,11 @@ github.com/openshift/library-go/test/library/metrics github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log +# github.com/operator-framework/api v0.36.0 +## explicit; go 1.24.6 +github.com/operator-framework/api/pkg/lib/version +github.com/operator-framework/api/pkg/operators +github.com/operator-framework/api/pkg/operators/v1alpha1 # github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20250118001652-a8b9c3c31417 ## explicit; go 1.22.0 github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/routeadvertisements/v1 @@ -1870,13 +1878,13 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.5.1 ## explicit; go 1.18 github.com/spf13/cast -# github.com/spf13/cobra v1.9.1 +# github.com/spf13/cobra v1.10.1 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/jwalterweatherman v1.1.0 ## explicit github.com/spf13/jwalterweatherman -# github.com/spf13/pflag v1.0.7 +# github.com/spf13/pflag v1.0.9 ## explicit; go 1.12 github.com/spf13/pflag # github.com/spf13/viper v1.8.1 @@ -1901,13 +1909,13 @@ github.com/src-d/gcfg/types # github.com/std-uritemplate/std-uritemplate/go/v2 v2.0.3 ## explicit; go 1.20 github.com/std-uritemplate/std-uritemplate/go/v2 -# github.com/stoewer/go-strcase v1.3.0 +# github.com/stoewer/go-strcase v1.3.1 ## explicit; go 1.11 github.com/stoewer/go-strcase # github.com/stretchr/objx v0.5.2 ## explicit; go 1.20 github.com/stretchr/objx -# github.com/stretchr/testify v1.11.0 +# github.com/stretchr/testify v1.11.1 ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml @@ -2143,12 +2151,12 @@ go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.34.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig @@ -2181,8 +2189,8 @@ go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.5.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/proto/otlp v1.7.0 +## explicit; go 1.23.0 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 @@ -2253,9 +2261,8 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c -## explicit; go 1.22.0 -golang.org/x/exp/constraints +# golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b +## explicit; go 1.23.0 golang.org/x/exp/slices # golang.org/x/image v0.11.0 ## explicit; go 1.12 @@ -4621,7 +4628,7 @@ k8s.io/utils/ptr k8s.io/utils/strings k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 ## explicit; go 1.21 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics @@ -4730,7 +4737,7 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient # sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16 ## explicit; go 1.22.0 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader -# sigs.k8s.io/controller-runtime v0.22.1 +# sigs.k8s.io/controller-runtime v0.22.3 ## explicit; go 1.24.0 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go index 0831f3e632a9..b94c40e439e9 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go +++ b/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go @@ -102,7 +102,6 @@ type Packet struct { Type PacketType `protobuf:"varint,1,opt,name=type,proto3,enum=PacketType" json:"type,omitempty"` // Types that are assignable to Payload: - // // *Packet_DialRequest // *Packet_DialResponse // *Packet_Data diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 092deb43d484..e9f731453b4f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -151,8 +151,7 @@ func newClient(config *rest.Config, options Options) (*client, error) { mapper: options.Mapper, codecs: serializer.NewCodecFactory(options.Scheme), - structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), - unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + resourceByType: make(map[cacheKey]*resourceMeta), } rawMetaClient, err := metadata.NewForConfigAndClient(metadata.ConfigFor(config), options.HTTPClient) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go index acff7a46a408..d75d685cbbb7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go @@ -48,11 +48,15 @@ type clientRestResources struct { // codecs are used to create a REST client for a gvk codecs serializer.CodecFactory - // structuredResourceByType stores structured type metadata - structuredResourceByType map[schema.GroupVersionKind]*resourceMeta - // unstructuredResourceByType stores unstructured type metadata - unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta - mu sync.RWMutex + // resourceByType stores type metadata + resourceByType map[cacheKey]*resourceMeta + + mu sync.RWMutex +} + +type cacheKey struct { + gvk schema.GroupVersionKind + forceDisableProtoBuf bool } // newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. @@ -117,11 +121,11 @@ func (c *clientRestResources) getResource(obj any) (*resourceMeta, error) { // It's better to do creation work twice than to not let multiple // people make requests at once c.mu.RLock() - resourceByType := c.structuredResourceByType - if isUnstructured { - resourceByType = c.unstructuredResourceByType - } - r, known := resourceByType[gvk] + + cacheKey := cacheKey{gvk: gvk, forceDisableProtoBuf: forceDisableProtoBuf} + + r, known := c.resourceByType[cacheKey] + c.mu.RUnlock() if known { @@ -140,7 +144,7 @@ func (c *clientRestResources) getResource(obj any) (*resourceMeta, error) { if err != nil { return nil, err } - resourceByType[gvk] = r + c.resourceByType[cacheKey] = r return r, err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go index 49942186c0f2..98df84c56b08 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go @@ -1,6 +1,7 @@ package priorityqueue import ( + "math" "sync" "sync/atomic" "time" @@ -206,6 +207,7 @@ func (w *priorityqueue[T]) spin() { blockForever := make(chan time.Time) var nextReady <-chan time.Time nextReady = blockForever + var nextItemReadyAt time.Time for { select { @@ -213,10 +215,10 @@ func (w *priorityqueue[T]) spin() { return case <-w.itemOrWaiterAdded: case <-nextReady: + nextReady = blockForever + nextItemReadyAt = time.Time{} } - nextReady = blockForever - func() { w.lock.Lock() defer w.lock.Unlock() @@ -227,39 +229,67 @@ func (w *priorityqueue[T]) spin() { // manipulating the tree from within Ascend might lead to panics, so // track what we want to delete and do it after we are done ascending. var toDelete []*item[T] - w.queue.Ascend(func(item *item[T]) bool { - if item.ReadyAt != nil { - if readyAt := item.ReadyAt.Sub(w.now()); readyAt > 0 { - nextReady = w.tick(readyAt) - return false + + var key T + + // Items in the queue tree are sorted first by priority and second by readiness, so + // items with a lower priority might be ready further down in the queue. + // We iterate through the priorities high to low until we find a ready item + pivot := item[T]{ + Key: key, + AddedCounter: 0, + Priority: math.MaxInt, + ReadyAt: nil, + } + + for { + pivotChange := false + + w.queue.AscendGreaterOrEqual(&pivot, func(item *item[T]) bool { + // Item is locked, we can not hand it out + if w.locked.Has(item.Key) { + return true } - if !w.becameReady.Has(item.Key) { - w.metrics.add(item.Key, item.Priority) - w.becameReady.Insert(item.Key) + + if item.ReadyAt != nil { + if readyAt := item.ReadyAt.Sub(w.now()); readyAt > 0 { + if nextItemReadyAt.After(*item.ReadyAt) || nextItemReadyAt.IsZero() { + nextReady = w.tick(readyAt) + nextItemReadyAt = *item.ReadyAt + } + + // Adjusting the pivot item moves the ascend to the next lower priority + pivot.Priority = item.Priority - 1 + pivotChange = true + return false + } + if !w.becameReady.Has(item.Key) { + w.metrics.add(item.Key, item.Priority) + w.becameReady.Insert(item.Key) + } } - } - if w.waiters.Load() == 0 { - // Have to keep iterating here to ensure we update metrics - // for further items that became ready and set nextReady. - return true - } + if w.waiters.Load() == 0 { + // Have to keep iterating here to ensure we update metrics + // for further items that became ready and set nextReady. + return true + } - // Item is locked, we can not hand it out - if w.locked.Has(item.Key) { - return true - } + w.metrics.get(item.Key, item.Priority) + w.locked.Insert(item.Key) + w.waiters.Add(-1) + delete(w.items, item.Key) + toDelete = append(toDelete, item) + w.becameReady.Delete(item.Key) + w.get <- *item - w.metrics.get(item.Key, item.Priority) - w.locked.Insert(item.Key) - w.waiters.Add(-1) - delete(w.items, item.Key) - toDelete = append(toDelete, item) - w.becameReady.Delete(item.Key) - w.get <- *item + return true + }) - return true - }) + if !pivotChange { + break + } + } for _, item := range toDelete { w.queue.Delete(item) @@ -290,9 +320,18 @@ func (w *priorityqueue[T]) GetWithPriority() (_ T, priority int, shutdown bool) w.notifyItemOrWaiterAdded() - item := <-w.get - - return item.Key, item.Priority, w.shutdown.Load() + select { + case <-w.done: + // Return if the queue was shutdown while we were already waiting for an item here. + // For example controller workers are continuously calling GetWithPriority and + // GetWithPriority is blocking the workers if there are no items in the queue. + // If the controller and accordingly the queue is then shut down, without this code + // branch the controller workers remain blocked here and are unable to shut down. + var zero T + return zero, 0, true + case item := <-w.get: + return item.Key, item.Priority, w.shutdown.Load() + } } func (w *priorityqueue[T]) Get() (item T, shutdown bool) { @@ -378,6 +417,9 @@ func (w *priorityqueue[T]) logState() { } func less[T comparable](a, b *item[T]) bool { + if a.Priority != b.Priority { + return a.Priority > b.Priority + } if a.ReadyAt == nil && b.ReadyAt != nil { return true } @@ -387,9 +429,6 @@ func less[T comparable](a, b *item[T]) bool { if a.ReadyAt != nil && b.ReadyAt != nil && !a.ReadyAt.Equal(*b.ReadyAt) { return a.ReadyAt.Before(*b.ReadyAt) } - if a.Priority != b.Priority { - return a.Priority > b.Priority - } return a.AddedCounter < b.AddedCounter } @@ -417,4 +456,5 @@ type bTree[T any] interface { ReplaceOrInsert(item T) (_ T, _ bool) Delete(item T) (T, bool) Ascend(iterator btree.ItemIteratorG[T]) + AscendGreaterOrEqual(pivot T, iterator btree.ItemIteratorG[T]) }