diff --git a/go.mod b/go.mod index af3178535a3d..4664be5519f8 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/gorilla/mux v1.8.1 github.com/gorilla/websocket v1.5.3 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 - github.com/grafana/dskit v0.0.0-20240829123714-e455adb7444a + github.com/grafana/dskit v0.0.0-20240905221822-931a021fb06b github.com/grafana/go-gelf/v2 v2.0.1 github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc @@ -102,7 +102,7 @@ require ( golang.org/x/sys v0.24.0 golang.org/x/time v0.6.0 google.golang.org/api v0.193.0 - google.golang.org/grpc v1.66.0 + google.golang.org/grpc v1.65.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -137,7 +137,7 @@ require ( github.com/richardartoul/molecule v1.0.0 github.com/schollz/progressbar/v3 v3.14.6 github.com/shirou/gopsutil/v4 v4.24.0-alpha.1 - github.com/thanos-io/objstore v0.0.0-20240828153123-de861b433240 + github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1 github.com/twmb/franz-go v1.17.1 github.com/twmb/franz-go/plugin/kprom v1.1.0 github.com/willf/bloom v2.0.3+incompatible diff --git a/go.sum b/go.sum index 799c3d424ccb..53733480e554 100644 --- a/go.sum +++ b/go.sum @@ -1048,8 +1048,8 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= -github.com/grafana/dskit v0.0.0-20240829123714-e455adb7444a h1:6ZCntjsewoGYfHK+ThEIVeCduaF1njbIu0nTC/G3K10= -github.com/grafana/dskit v0.0.0-20240829123714-e455adb7444a/go.mod h1:XoznwASUB2n11UlII8bK7zYd3n6EbR7MPWNJTpc6ADs= +github.com/grafana/dskit v0.0.0-20240905221822-931a021fb06b h1:x2HCzk29I0o5pRPfqWP/qwhXaPGlcz8pohq5kO1NZoE= +github.com/grafana/dskit v0.0.0-20240905221822-931a021fb06b/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= @@ -1805,8 +1805,8 @@ github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955u github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= -github.com/thanos-io/objstore v0.0.0-20240828153123-de861b433240 h1:0av9LH8A351YQWlrqb7Kb+hRdfrxqpOjL3rDQirCL5g= -github.com/thanos-io/objstore v0.0.0-20240828153123-de861b433240/go.mod h1:Cba80S8NbVBBdyZKzra7San/jXvpAxArbpFymWzIZhg= +github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1 h1:z0v9BB/p7s4J6R//+0a5M3wCld8KzNjrGRLIwXfrAZk= +github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1/go.mod h1:3ukSkG4rIRUGkKM4oIz+BSuUx2e3RlQVVv3Cc3W+Tv4= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -2669,8 +2669,8 @@ google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/vendor/github.com/grafana/dskit/ring/batch.go b/vendor/github.com/grafana/dskit/ring/batch.go index f982bd6c68c3..e107cab830fc 100644 --- a/vendor/github.com/grafana/dskit/ring/batch.go +++ b/vendor/github.com/grafana/dskit/ring/batch.go @@ -131,7 +131,7 @@ func DoBatchWithOptions(ctx context.Context, op Operation, r DoBatchRing, keys [ // Get call below takes ~1 microsecond for ~500 instances. // Checking every 10K calls would be every 10ms. if i%10e3 == 0 { - if err := ctx.Err(); err != nil { + if err := context.Cause(ctx); err != nil { o.Cleanup() return err } @@ -161,7 +161,7 @@ func DoBatchWithOptions(ctx context.Context, op Operation, r DoBatchRing, keys [ } // One last check before calling the callbacks: it doesn't make sense if context is canceled. - if err := ctx.Err(); err != nil { + if err := context.Cause(ctx); err != nil { o.Cleanup() return err } @@ -196,7 +196,7 @@ func DoBatchWithOptions(ctx context.Context, op Operation, r DoBatchRing, keys [ case <-tracker.done: return nil case <-ctx.Done(): - return ctx.Err() + return context.Cause(ctx) } } diff --git a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go index 87e31f66bacd..a697e597f043 100644 --- a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go +++ b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go @@ -22,7 +22,9 @@ import ( "google.golang.org/api/iterator" "google.golang.org/api/option" htransport "google.golang.org/api/transport/http" + "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/experimental" "google.golang.org/grpc/status" "gopkg.in/yaml.v2" @@ -153,6 +155,7 @@ func newBucket(ctx context.Context, logger log.Logger, gc Config, opts []option. ) if gc.UseGRPC { opts = append(opts, + option.WithGRPCDialOption(experimental.WithRecvBufferPool(grpc.NewSharedBufferPool())), option.WithGRPCConnectionPool(gc.GRPCConnPoolSize), ) gcsClient, err = storage.NewGRPCClient(ctx, opts...) diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 5d4096d46a04..6a8a07781ae3 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,28 +9,21 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) -- [aranjans](https://github.com/aranjans), Google LLC -- [arjan-bal](https://github.com/arjan-bal), Google LLC -- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. +- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [erm-g](https://github.com/erm-g), Google LLC -- [gtcooke94](https://github.com/gtcooke94), Google LLC -- [purnesh42h](https://github.com/purnesh42h), Google LLC -- [zasweq](https://github.com/zasweq), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez) -- [canguler](https://github.com/canguler) -- [cesarghali](https://github.com/cesarghali) -- [iamqizhao](https://github.com/iamqizhao) -- [jeanbza](https://github.com/jeanbza) -- [jtattermusch](https://github.com/jtattermusch) -- [lyuxuan](https://github.com/lyuxuan) -- [makmukhi](https://github.com/makmukhi) -- [matt-kwong](https://github.com/matt-kwong) -- [menghanl](https://github.com/menghanl) -- [nicolasnoble](https://github.com/nicolasnoble) -- [srini100](https://github.com/srini100) -- [yongni](https://github.com/yongni) +- [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index abab279379ba..be6e108705c4 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index d7b40b7cb66f..0787d0b50ce9 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specified +// DefaultConfig is a backoff configuration with the default values specfied // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b181f386a1ba..f391744f7299 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,7 +30,6 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -73,21 +72,8 @@ func unregisterForTesting(name string) { delete(m, name) } -// connectedAddress returns the connected address for a SubConnState. The -// address is only valid if the state is READY. -func connectedAddress(scs SubConnState) resolver.Address { - return scs.connectedAddress -} - -// setConnectedAddress sets the connected address for a SubConnState. -func setConnectedAddress(scs *SubConnState, addr resolver.Address) { - scs.connectedAddress = addr -} - func init() { internal.BalancerUnregister = unregisterForTesting - internal.ConnectedAddress = connectedAddress - internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -257,10 +243,6 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target - // MetricsRecorder is the metrics recorder that balancers can use to record - // metrics. Balancer implementations which do not register metrics on - // metrics registry and record on them can ignore this field. - MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -428,9 +410,6 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error - // connectedAddr contains the connected address when ConnectivityState is - // Ready. Otherwise, it is indeterminate. - connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index f0b1a274fe91..0adc98866c08 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -20,7 +20,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.1 -// protoc v5.27.1 +// protoc v4.25.2 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 84e6a25056b9..57a792a7b488 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.27.1 +// - protoc-gen-go-grpc v1.4.0 +// - protoc v4.25.2 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -34,8 +34,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" @@ -46,7 +46,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type LoadBalancerClient interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) } type loadBalancerClient struct { @@ -57,38 +57,53 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { return &loadBalancerClient{cc} } -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) { +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[LoadBalanceRequest, LoadBalanceResponse]{ClientStream: stream} + x := &loadBalancerBalanceLoadClient{ClientStream: stream} return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type LoadBalancer_BalanceLoadClient = grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse] +type LoadBalancer_BalanceLoadClient interface { + Send(*LoadBalanceRequest) error + Recv() (*LoadBalanceResponse, error) + grpc.ClientStream +} + +type loadBalancerBalanceLoadClient struct { + grpc.ClientStream +} + +func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { + m := new(LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} // LoadBalancerServer is the server API for LoadBalancer service. // All implementations should embed UnimplementedLoadBalancerServer -// for forward compatibility. +// for forward compatibility type LoadBalancerServer interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error + BalanceLoad(LoadBalancer_BalanceLoadServer) error } -// UnimplementedLoadBalancerServer should be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedLoadBalancerServer struct{} +// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations. +type UnimplementedLoadBalancerServer struct { +} -func (UnimplementedLoadBalancerServer) BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error { +func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error { return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") } -func (UnimplementedLoadBalancerServer) testEmbeddedByValue() {} // UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to LoadBalancerServer will @@ -98,22 +113,34 @@ type UnsafeLoadBalancerServer interface { } func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) { - // If the following call panics, it indicates UnimplementedLoadBalancerServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&LoadBalancer_ServiceDesc, srv) } func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&grpc.GenericServerStream[LoadBalanceRequest, LoadBalanceResponse]{ServerStream: stream}) + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{ServerStream: stream}) +} + +type LoadBalancer_BalanceLoadServer interface { + Send(*LoadBalanceResponse) error + Recv() (*LoadBalanceRequest, error) + grpc.ServerStream +} + +type loadBalancerBalanceLoadServer struct { + grpc.ServerStream } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type LoadBalancer_BalanceLoadServer = grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse] +func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { + m := new(LoadBalanceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} // LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index c09876274131..47a3e938dcf5 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -219,7 +219,7 @@ type lbBalancer struct { // All backends addresses, with metadata set to nil. This list contains all // backend addresses in the same order and with the same duplicates as in // serverlist. When generating picker, a SubConn slice with the same order - // but with only READY SCs will be generated. + // but with only READY SCs will be gerenated. backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State diff --git a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go index ddd9bd269bf4..c248a3a83c32 100644 --- a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go +++ b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go @@ -112,9 +112,7 @@ type scWithRPCCount struct { } func (lrb *leastRequestBalancer) Build(info base.PickerBuildInfo) balancer.Picker { - if logger.V(2) { - logger.Infof("least-request: Build called with info: %v", info) - } + logger.Infof("least-request: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 5b592f48ad9d..07527603f1d4 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forward the corresponding correct endpoints down/split + // target do not forwarrd the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses diff --git a/vendor/google.golang.org/grpc/balancer/rls/balancer.go b/vendor/google.golang.org/grpc/balancer/rls/balancer.go index 5ae4d2e13167..3ac28271618b 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/rls/balancer.go @@ -30,7 +30,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" @@ -78,42 +77,6 @@ var ( clientConnUpdateHook = func() {} dataCachePurgeHook = func() {} resetBackoffHook = func() {} - - cacheEntriesMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{ - Name: "grpc.lb.rls.cache_entries", - Description: "EXPERIMENTAL. Number of entries in the RLS cache.", - Unit: "entry", - Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"}, - Default: false, - }) - cacheSizeMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{ - Name: "grpc.lb.rls.cache_size", - Description: "EXPERIMENTAL. The current size of the RLS cache.", - Unit: "By", - Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"}, - Default: false, - }) - defaultTargetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ - Name: "grpc.lb.rls.default_target_picks", - Description: "EXPERIMENTAL. Number of LB picks sent to the default target.", - Unit: "pick", - Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"}, - Default: false, - }) - targetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ - Name: "grpc.lb.rls.target_picks", - Description: "EXPERIMENTAL. Number of LB picks sent to each RLS target. Note that if the default target is also returned by the RLS server, RPCs sent to that target from the cache will be counted in this metric, not in grpc.rls.default_target_picks.", - Unit: "pick", - Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"}, - Default: false, - }) - failedPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ - Name: "grpc.lb.rls.failed_picks", - Description: "EXPERIMENTAL. Number of LB picks failed due to either a failed RLS request or the RLS channel being throttled.", - Unit: "pick", - Labels: []string{"grpc.target", "grpc.lb.rls.server_target"}, - Default: false, - }) ) func init() { @@ -140,7 +103,7 @@ func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. updateCh: buffer.NewUnbounded(), } lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb)) - lb.dataCache = newDataCache(maxCacheSize, lb.logger, opts.MetricsRecorder, opts.Target.String()) + lb.dataCache = newDataCache(maxCacheSize, lb.logger) lb.bg = balancergroup.New(balancergroup.Options{ CC: cc, BuildOpts: opts, @@ -322,27 +285,27 @@ func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error // Update the copy of the config in the LB policy before releasing the lock. b.lbCfg = newCfg - b.stateMu.Unlock() - // We cannot do cache operations above because `cacheMu` needs to be grabbed - // before `stateMu` if we are to hold both locks at the same time. - b.cacheMu.Lock() - b.dataCache.updateRLSServerTarget(newCfg.lookupService) - if resizeCache { - // If the new config changes reduces the size of the data cache, we - // might have to evict entries to get the cache size down to the newly - // specified size. If we do evict an entry with valid backoff timer, - // the new picker needs to be sent to the channel to re-process any - // RPCs queued as a result of this backoff timer. - b.dataCache.resize(newCfg.cacheSizeBytes) - } - b.cacheMu.Unlock() // Enqueue an event which will notify us when the above update has been // propagated to all child policies, and the child policies have all // processed their updates, and we have sent a picker update. done := make(chan struct{}) b.updateCh.Put(resumePickerUpdates{done: done}) + b.stateMu.Unlock() <-done + + if resizeCache { + // If the new config changes reduces the size of the data cache, we + // might have to evict entries to get the cache size down to the newly + // specified size. + // + // And we cannot do this operation above (where we compute the + // `resizeCache` boolean) because `cacheMu` needs to be grabbed before + // `stateMu` if we are to hold both locks at the same time. + b.cacheMu.Lock() + b.dataCache.resize(newCfg.cacheSizeBytes) + b.cacheMu.Unlock() + } return nil } @@ -527,19 +490,15 @@ func (b *rlsBalancer) sendNewPickerLocked() { if b.defaultPolicy != nil { b.defaultPolicy.acquireRef() } - picker := &rlsPicker{ - kbm: b.lbCfg.kbMap, - origEndpoint: b.bopts.Target.Endpoint(), - lb: b, - defaultPolicy: b.defaultPolicy, - ctrlCh: b.ctrlCh, - maxAge: b.lbCfg.maxAge, - staleAge: b.lbCfg.staleAge, - bg: b.bg, - rlsServerTarget: b.lbCfg.lookupService, - grpcTarget: b.bopts.Target.String(), - metricsRecorder: b.bopts.MetricsRecorder, + kbm: b.lbCfg.kbMap, + origEndpoint: b.bopts.Target.Endpoint(), + lb: b, + defaultPolicy: b.defaultPolicy, + ctrlCh: b.ctrlCh, + maxAge: b.lbCfg.maxAge, + staleAge: b.lbCfg.staleAge, + bg: b.bg, } picker.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-picker %p] ", picker)) state := balancer.State{ diff --git a/vendor/google.golang.org/grpc/balancer/rls/cache.go b/vendor/google.golang.org/grpc/balancer/rls/cache.go index 7fe796c9587a..d7a6a1a436c6 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/cache.go +++ b/vendor/google.golang.org/grpc/balancer/rls/cache.go @@ -22,8 +22,6 @@ import ( "container/list" "time" - "github.com/google/uuid" - estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal/backoff" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" @@ -49,7 +47,7 @@ type cacheEntry struct { // headerData is received in the RLS response and is to be sent in the // X-Google-RLS-Data header for matching RPCs. headerData string - // expiryTime is the absolute time at which this cache entry stops + // expiryTime is the absolute time at which this cache entry entry stops // being valid. When an RLS request succeeds, this is set to the current // time plus the max_age field from the LB policy config. expiryTime time.Time @@ -165,39 +163,24 @@ func (l *lru) getLeastRecentlyUsed() cacheKey { // // It is not safe for concurrent access. type dataCache struct { - maxSize int64 // Maximum allowed size. - currentSize int64 // Current size. - keys *lru // Cache keys maintained in lru order. - entries map[cacheKey]*cacheEntry - logger *internalgrpclog.PrefixLogger - shutdown *grpcsync.Event - rlsServerTarget string - - // Read only after initialization. - grpcTarget string - uuid string - metricsRecorder estats.MetricsRecorder + maxSize int64 // Maximum allowed size. + currentSize int64 // Current size. + keys *lru // Cache keys maintained in lru order. + entries map[cacheKey]*cacheEntry + logger *internalgrpclog.PrefixLogger + shutdown *grpcsync.Event } -func newDataCache(size int64, logger *internalgrpclog.PrefixLogger, metricsRecorder estats.MetricsRecorder, grpcTarget string) *dataCache { +func newDataCache(size int64, logger *internalgrpclog.PrefixLogger) *dataCache { return &dataCache{ - maxSize: size, - keys: newLRU(), - entries: make(map[cacheKey]*cacheEntry), - logger: logger, - shutdown: grpcsync.NewEvent(), - grpcTarget: grpcTarget, - uuid: uuid.New().String(), - metricsRecorder: metricsRecorder, + maxSize: size, + keys: newLRU(), + entries: make(map[cacheKey]*cacheEntry), + logger: logger, + shutdown: grpcsync.NewEvent(), } } -// updateRLSServerTarget updates the RLS Server Target the RLS Balancer is -// configured with. -func (dc *dataCache) updateRLSServerTarget(rlsServerTarget string) { - dc.rlsServerTarget = rlsServerTarget -} - // resize changes the maximum allowed size of the data cache. // // The return value indicates if an entry with a valid backoff timer was @@ -240,7 +223,7 @@ func (dc *dataCache) resize(size int64) (backoffCancelled bool) { backoffCancelled = true } } - dc.deleteAndCleanup(key, entry) + dc.deleteAndcleanup(key, entry) } dc.maxSize = size return backoffCancelled @@ -266,7 +249,7 @@ func (dc *dataCache) evictExpiredEntries() bool { if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) { continue } - dc.deleteAndCleanup(key, entry) + dc.deleteAndcleanup(key, entry) evicted = true } return evicted @@ -327,8 +310,6 @@ func (dc *dataCache) addEntry(key cacheKey, entry *cacheEntry) (backoffCancelled if dc.currentSize > dc.maxSize { backoffCancelled = dc.resize(dc.maxSize) } - cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) - cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid) return backoffCancelled, true } @@ -338,7 +319,6 @@ func (dc *dataCache) updateEntrySize(entry *cacheEntry, newSize int64) { dc.currentSize -= entry.size entry.size = newSize dc.currentSize += entry.size - cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) } func (dc *dataCache) getEntry(key cacheKey) *cacheEntry { @@ -359,7 +339,7 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) { if !ok { return } - dc.deleteAndCleanup(key, entry) + dc.deleteAndcleanup(key, entry) } // deleteAndCleanup performs actions required at the time of deleting an entry @@ -367,17 +347,15 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) { // - the entry is removed from the map of entries // - current size of the data cache is update // - the key is removed from the LRU -func (dc *dataCache) deleteAndCleanup(key cacheKey, entry *cacheEntry) { +func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) { delete(dc.entries, key) dc.currentSize -= entry.size dc.keys.removeEntry(key) - cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) - cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid) } func (dc *dataCache) stop() { for key, entry := range dc.entries { - dc.deleteAndCleanup(key, entry) + dc.deleteAndcleanup(key, entry) } dc.shutdown.Fire() } diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go b/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go index cc5ce510ad90..d010f74456fe 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go @@ -218,7 +218,7 @@ type matcher struct { names []string } -// Equal reports if m and a are equivalent headerKeys. +// Equal reports if m and are are equivalent headerKeys. func (m matcher) Equal(a matcher) bool { if m.key != a.key { return false diff --git a/vendor/google.golang.org/grpc/balancer/rls/picker.go b/vendor/google.golang.org/grpc/balancer/rls/picker.go index e5c86f290687..8f617a4e42e0 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/picker.go +++ b/vendor/google.golang.org/grpc/balancer/rls/picker.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/balancer/rls/internal/keys" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - estats "google.golang.org/grpc/experimental/stats" internalgrpclog "google.golang.org/grpc/internal/grpclog" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" @@ -62,15 +61,12 @@ type rlsPicker struct { // The picker is given its own copy of the below fields from the RLS LB policy // to avoid having to grab the mutex on the latter. - rlsServerTarget string - grpcTarget string - metricsRecorder estats.MetricsRecorder - defaultPolicy *childPolicyWrapper // Child policy for the default target. - ctrlCh *controlChannel // Control channel to the RLS server. - maxAge time.Duration // Cache max age from LB config. - staleAge time.Duration // Cache stale age from LB config. - bg exitIdler - logger *internalgrpclog.PrefixLogger + defaultPolicy *childPolicyWrapper // Child policy for the default target. + ctrlCh *controlChannel // Control channel to the RLS server. + maxAge time.Duration // Cache max age from LB config. + staleAge time.Duration // Cache stale age from LB config. + bg exitIdler + logger *internalgrpclog.PrefixLogger } // isFullMethodNameValid return true if name is of the form `/service/method`. @@ -89,17 +85,7 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) p.lb.cacheMu.Lock() - var pr balancer.PickResult - var err error - - // Record metrics without the cache mutex held, to prevent lock contention - // between concurrent RPC's and their Pick calls. Metrics Recording can - // potentially be expensive. - metricsCallback := func() {} - defer func() { - p.lb.cacheMu.Unlock() - metricsCallback() - }() + defer p.lb.cacheMu.Unlock() // Lookup data cache and pending request map using request path and keys. cacheKey := cacheKey{path: info.FullMethodName, keys: reqKeys.Str} @@ -112,8 +98,7 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { case dcEntry == nil && pendingEntry == nil: throttled := p.sendRouteLookupRequestLocked(cacheKey, &backoffState{bs: defaultBackoffStrategy}, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") if throttled { - pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled) - return pr, err + return p.useDefaultPickIfPossible(info, errRLSThrottled) } return balancer.PickResult{}, balancer.ErrNoSubConnAvailable @@ -128,8 +113,8 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData) } // Delegate to child policies. - pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info) - return pr, err + res, err := p.delegateToChildPoliciesLocked(dcEntry, info) + return res, err } // We get here only if the data cache entry has expired. If entry is in @@ -141,108 +126,67 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // message received from the control plane is still fine, as it could be // useful for debugging purposes. st := dcEntry.status - pr, metricsCallback, err = p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) - return pr, err + return p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) } // We get here only if the entry has expired and is not in backoff. throttled := p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") if throttled { - pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled) - return pr, err + return p.useDefaultPickIfPossible(info, errRLSThrottled) } return balancer.PickResult{}, balancer.ErrNoSubConnAvailable // Data cache hit. Pending request exists. default: if dcEntry.expiryTime.After(now) { - pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info) - return pr, err + res, err := p.delegateToChildPoliciesLocked(dcEntry, info) + return res, err } // Data cache entry has expired and pending request exists. Queue pick. return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } } -// errToPickResult is a helper function which converts the error value returned -// by Pick() to a string that represents the pick result. -func errToPickResult(err error) string { - if err == nil { - return "complete" - } - if errors.Is(err, balancer.ErrNoSubConnAvailable) { - return "queue" - } - if _, ok := status.FromError(err); ok { - return "drop" - } - return "fail" -} - // delegateToChildPoliciesLocked is a helper function which iterates through the // list of child policy wrappers in a cache entry and attempts to find a child // policy to which this RPC can be routed to. If all child policies are in -// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. Returns -// a function to be invoked to record metrics. -func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, func(), error) { +// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. +func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) { const rlsDataHeaderName = "x-google-rls-data" for i, cpw := range dcEntry.childPolicyWrappers { state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) // Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if // it is the last one (which handles the case of delegating to the last - // child picker if all child policies are in TRANSIENT_FAILURE). + // child picker if all child polcies are in TRANSIENT_FAILURE). if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 { // Any header data received from the RLS server is stored in the // cache entry and needs to be sent to the actual backend in the // X-Google-RLS-Data header. res, err := state.Picker.Pick(info) if err != nil { - pr := errToPickResult(err) - return res, func() { - if pr == "queue" { - // Don't record metrics for queued Picks. - return - } - targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, pr) - }, err + return res, err } - if res.Metadata == nil { res.Metadata = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData) } else { res.Metadata.Append(rlsDataHeaderName, dcEntry.headerData) } - return res, func() { - targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, "complete") - }, nil + return res, nil } } - // In the unlikely event that we have a cache entry with no targets, we end up // queueing the RPC. - return balancer.PickResult{}, func() {}, balancer.ErrNoSubConnAvailable + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } // useDefaultPickIfPossible is a helper method which delegates to the default -// target if one is configured, or fails the pick with the given error. Returns -// a function to be invoked to record metrics. -func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, func(), error) { +// target if one is configured, or fails the pick with the given error. +func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, error) { if p.defaultPolicy != nil { state := (*balancer.State)(atomic.LoadPointer(&p.defaultPolicy.state)) - res, err := state.Picker.Pick(info) - pr := errToPickResult(err) - return res, func() { - if pr == "queue" { - // Don't record metrics for queued Picks. - return - } - defaultTargetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, p.defaultPolicy.target, pr) - }, err + return state.Picker.Pick(info) } - - return balancer.PickResult{}, func() { - failedPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget) - }, errOnNoDefault + return balancer.PickResult{}, errOnNoDefault } // sendRouteLookupRequestLocked adds an entry to the pending request map and diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go index ed241124219e..36606e79e444 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -32,9 +32,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/balancer/weightedroundrobin/internal" - "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" - estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal/grpclog" iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/orca" @@ -47,43 +45,6 @@ import ( // Name is the name of the weighted round robin balancer. const Name = "weighted_round_robin" -var ( - rrFallbackMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ - Name: "grpc.lb.wrr.rr_fallback", - Description: "EXPERIMENTAL. Number of scheduler updates in which there were not enough endpoints with valid weight, which caused the WRR policy to fall back to RR behavior.", - Unit: "update", - Labels: []string{"grpc.target"}, - OptionalLabels: []string{"grpc.lb.locality"}, - Default: false, - }) - - endpointWeightNotYetUsableMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ - Name: "grpc.lb.wrr.endpoint_weight_not_yet_usable", - Description: "EXPERIMENTAL. Number of endpoints from each scheduler update that don't yet have usable weight information (i.e., either the load report has not yet been received, or it is within the blackout period).", - Unit: "endpoint", - Labels: []string{"grpc.target"}, - OptionalLabels: []string{"grpc.lb.locality"}, - Default: false, - }) - - endpointWeightStaleMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ - Name: "grpc.lb.wrr.endpoint_weight_stale", - Description: "EXPERIMENTAL. Number of endpoints from each scheduler update whose latest weight is older than the expiration period.", - Unit: "endpoint", - Labels: []string{"grpc.target"}, - OptionalLabels: []string{"grpc.lb.locality"}, - Default: false, - }) - endpointWeightsMetric = estats.RegisterFloat64Histo(estats.MetricDescriptor{ - Name: "grpc.lb.wrr.endpoint_weights", - Description: "EXPERIMENTAL. Weight of each endpoint, recorded on every scheduler update. Endpoints without usable weights will be recorded as weight 0.", - Unit: "endpoint", - Labels: []string{"grpc.target"}, - OptionalLabels: []string{"grpc.lb.locality"}, - Default: false, - }) -) - func init() { balancer.Register(bb{}) } @@ -97,10 +58,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba csEvltr: &balancer.ConnectivityStateEvaluator{}, scMap: make(map[balancer.SubConn]*weightedSubConn), connectivityState: connectivity.Connecting, - target: bOpts.Target.String(), - metricsRecorder: bOpts.MetricsRecorder, } - b.logger = prefixLogger(b) b.logger.Infof("Created") return b @@ -143,11 +101,8 @@ func (bb) Name() string { // wrrBalancer implements the weighted round robin LB policy. type wrrBalancer struct { - // The following fields are immutable. - cc balancer.ClientConn - logger *grpclog.PrefixLogger - target string - metricsRecorder estats.MetricsRecorder + cc balancer.ClientConn + logger *grpclog.PrefixLogger // The following fields are only accessed on calls into the LB policy, and // do not need a mutex. @@ -159,7 +114,6 @@ type wrrBalancer struct { resolverErr error // the last error reported by the resolver; cleared on successful resolution connErr error // the last connection error; cleared upon leaving TransientFailure stopPicker func() - locality string } func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { @@ -171,7 +125,6 @@ func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error } b.cfg = cfg - b.locality = weightedtarget.LocalityFromResolverState(ccs.ResolverState) b.updateAddresses(ccs.ResolverState.Addresses) if len(ccs.ResolverState.Addresses) == 0 { @@ -218,10 +171,6 @@ func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { // Initially, we set load reports to off, because they are not // running upon initial weightedSubConn creation. cfg: &lbConfig{EnableOOBLoadReport: false}, - - metricsRecorder: b.metricsRecorder, - target: b.target, - locality: b.locality, } b.subConns.Set(addr, wsc) b.scMap[sc] = wsc @@ -369,12 +318,9 @@ func (b *wrrBalancer) regeneratePicker() { } p := &picker{ - v: rand.Uint32(), // start the scheduler at a random point - cfg: b.cfg, - subConns: b.readySubConns(), - metricsRecorder: b.metricsRecorder, - locality: b.locality, - target: b.target, + v: rand.Uint32(), // start the scheduler at a random point + cfg: b.cfg, + subConns: b.readySubConns(), } var ctx context.Context ctx, b.stopPicker = context.WithCancel(context.Background()) @@ -393,20 +339,16 @@ type picker struct { v uint32 // incrementing value used by the scheduler; accessed atomically cfg *lbConfig // active config when picker created subConns []*weightedSubConn // all READY subconns - - // The following fields are immutable. - target string - locality string - metricsRecorder estats.MetricsRecorder } -func (p *picker) scWeights(recordMetrics bool) []float64 { +// scWeights returns a slice containing the weights from p.subConns in the same +// order as p.subConns. +func (p *picker) scWeights() []float64 { ws := make([]float64, len(p.subConns)) now := internal.TimeNow() for i, wsc := range p.subConns { - ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod), recordMetrics) + ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod)) } - return ws } @@ -415,7 +357,7 @@ func (p *picker) inc() uint32 { } func (p *picker) regenerateScheduler() { - s := p.newScheduler(true) + s := newScheduler(p.scWeights(), p.inc) atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s)) } @@ -425,7 +367,6 @@ func (p *picker) start(ctx context.Context) { // No need to regenerate weights with only one backend. return } - go func() { ticker := time.NewTicker(time.Duration(p.cfg.WeightUpdatePeriod)) defer ticker.Stop() @@ -463,12 +404,8 @@ func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // When needed, it also tracks connectivity state, listens for metrics updates // by implementing the orca.OOBListener interface and manages that listener. type weightedSubConn struct { - // The following fields are immutable. balancer.SubConn - logger *grpclog.PrefixLogger - target string - metricsRecorder estats.MetricsRecorder - locality string + logger *grpclog.PrefixLogger // The following fields are only accessed on calls into the LB policy, and // do not need a mutex. @@ -558,17 +495,14 @@ func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connect w.SubConn.Connect() case connectivity.Ready: // If we transition back to READY state, reset nonEmptySince so that we - // apply the blackout period after we start receiving load data. Also - // reset lastUpdated to trigger endpoint weight not yet usable in the - // case endpoint gets asked what weight it is before receiving a new - // load report. Note that we cannot guarantee that we will never receive - // lingering callbacks for backend metric reports from the previous - // connection after the new connection has been established, but they - // should be masked by new backend metric reports from the new - // connection by the time the blackout period ends. + // apply the blackout period after we start receiving load data. Note + // that we cannot guarantee that we will never receive lingering + // callbacks for backend metric reports from the previous connection + // after the new connection has been established, but they should be + // masked by new backend metric reports from the new connection by the + // time the blackout period ends. w.mu.Lock() w.nonEmptySince = time.Time{} - w.lastUpdated = time.Time{} w.mu.Unlock() case connectivity.Shutdown: if w.stopORCAListener != nil { @@ -593,44 +527,21 @@ func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connect // weight returns the current effective weight of the subconn, taking into // account the parameters. Returns 0 for blacked out or expired data, which -// will cause the backend weight to be treated as the mean of the weights of the -// other backends. If forScheduler is set to true, this function will emit -// metrics through the metrics registry. -func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration, recordMetrics bool) (weight float64) { +// will cause the backend weight to be treated as the mean of the weights of +// the other backends. +func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration) float64 { w.mu.Lock() defer w.mu.Unlock() - - if recordMetrics { - defer func() { - endpointWeightsMetric.Record(w.metricsRecorder, weight, w.target, w.locality) - }() - } - - // The SubConn has not received a load report (i.e. just turned READY with - // no load report). - if w.lastUpdated == (time.Time{}) { - endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) - return 0 - } - // If the most recent update was longer ago than the expiration period, // reset nonEmptySince so that we apply the blackout period again if we // start getting data again in the future, and return 0. if now.Sub(w.lastUpdated) >= weightExpirationPeriod { - if recordMetrics { - endpointWeightStaleMetric.Record(w.metricsRecorder, 1, w.target, w.locality) - } w.nonEmptySince = time.Time{} return 0 } - // If we don't have at least blackoutPeriod worth of data, return 0. if blackoutPeriod != 0 && (w.nonEmptySince == (time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) { - if recordMetrics { - endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) - } return 0 } - return w.weightVal } diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go index 56aa15da10d2..f389678b4e82 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go @@ -31,17 +31,13 @@ type scheduler interface { // len(scWeights)-1 are zero or there is only a single subconn, otherwise it // will return an Earliest Deadline First (EDF) scheduler implementation that // selects the subchannels according to their weights. -func (p *picker) newScheduler(recordMetrics bool) scheduler { - scWeights := p.scWeights(recordMetrics) +func newScheduler(scWeights []float64, inc func() uint32) scheduler { n := len(scWeights) if n == 0 { return nil } if n == 1 { - if recordMetrics { - rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality) - } - return &rrScheduler{numSCs: 1, inc: p.inc} + return &rrScheduler{numSCs: 1, inc: inc} } sum := float64(0) numZero := 0 @@ -55,12 +51,8 @@ func (p *picker) newScheduler(recordMetrics bool) scheduler { numZero++ } } - if numZero >= n-1 { - if recordMetrics { - rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality) - } - return &rrScheduler{numSCs: uint32(n), inc: p.inc} + return &rrScheduler{numSCs: uint32(n), inc: inc} } unscaledMean := sum / float64(n-numZero) scalingFactor := maxWeight / max @@ -82,11 +74,11 @@ func (p *picker) newScheduler(recordMetrics bool) scheduler { } if allEqual { - return &rrScheduler{numSCs: uint32(n), inc: p.inc} + return &rrScheduler{numSCs: uint32(n), inc: inc} } logger.Infof("using edf scheduler with weights: %v", weights) - return &edfScheduler{weights: weights, inc: p.inc} + return &edfScheduler{weights: weights, inc: inc} } const maxWeight = math.MaxUint16 diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go index bcc8aca8b491..27279257ed13 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -89,7 +89,7 @@ func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr } // Start starts the aggregator. It can be called after Stop to restart the -// aggregator. +// aggretator. func (wbsa *Aggregator) Start() { wbsa.mu.Lock() defer wbsa.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go index dfd1ef26dcb0..220f4e555674 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go @@ -84,17 +84,6 @@ type weightedTargetBalancer struct { targets map[string]Target } -type localityKeyType string - -const localityKey = localityKeyType("locality") - -// LocalityFromResolverState returns the locality from the resolver.State -// provided, or an empty string if not present. -func LocalityFromResolverState(state resolver.State) string { - locality, _ := state.Attributes.Value(localityKey).(string) - return locality -} - // UpdateClientConnState takes the new targets in balancer group, // creates/deletes sub-balancers and sends them update. addresses are split into // groups based on hierarchy path. @@ -153,7 +142,7 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat ResolverState: resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, - Attributes: s.ResolverState.Attributes.WithValue(localityKey, name), + Attributes: s.ResolverState.Attributes, }, BalancerConfig: newT.ChildPolicy.Config, }) diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 6561b769ebf7..4161fdf47a8b 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -25,15 +25,12 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) - // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -82,7 +79,6 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, - MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -96,7 +92,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - uccs := func(ctx context.Context) { + ok := ccb.serializer.Schedule(func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -111,23 +107,17 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err + }) + if !ok { + return nil } - onFailure := func() { close(errCh) } - - // UpdateClientConnState can race with Close, and when the latter wins, the - // serializer is closed, and the attempt to schedule the callback will fail. - // It is acceptable to ignore this failure. But since we want to handle the - // state update in a blocking fashion (when we successfully schedule the - // callback), we have to use the ScheduleOr method and not the MaybeSchedule - // method on the serializer. - ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.TrySchedule(func(ctx context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -143,7 +133,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.TrySchedule(func(context.Context) { + ccb.serializer.Schedule(func(context.Context) { if ccb.balancer == nil { return } @@ -155,7 +145,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.TrySchedule(func(ctx context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -262,29 +252,15 @@ type acBalancerWrapper struct { // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { - acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} - if s == connectivity.Ready { - setConnectedAddress(&scs, curAddr) - } - acbw.stateListener(scs) - acbw.ac.mu.Lock() - defer acbw.ac.mu.Unlock() - if s == connectivity.Ready { - // When changing states to READY, reset stateReadyChan. Wait until - // after we notify the LB policy's listener(s) in order to prevent - // ac.getTransport() from unblocking before the LB policy starts - // tracking the subchannel as READY. - close(acbw.ac.stateReadyChan) - acbw.ac.stateReadyChan = make(chan struct{}) - } + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index fcd1cfe80245..63c639e4fe93 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.1 -// protoc v5.27.1 +// protoc v4.25.2 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 9c8850e3fdd5..423be7b43b00 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,7 +24,6 @@ import ( "fmt" "math" "net/url" - "slices" "strings" "sync" "sync/atomic" @@ -40,7 +39,6 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -196,11 +194,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) - cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) - cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) - return cc, nil } @@ -595,14 +590,13 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). - authority string // See initAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). - idlenessMgr *idle.Manager - metricsRecorderList *stats.MetricsRecorderList + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -632,6 +626,11 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -646,6 +645,11 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -808,11 +812,17 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice. Addresses are passed during -// subconn creation and address update operations. -func copyAddresses(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - copy(out, in) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } return out } @@ -825,14 +835,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddresses(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateReadyChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddressesWithoutBalancerAttributes(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -908,29 +918,28 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } + ac.mu.Unlock() - ac.resetTransportAndUnlock() + ac.resetTransport() return nil } -// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. -// This is different from the Equal method on the resolver.Address type which -// considers all fields to determine equality. Here, we only consider fields -// that are meaningful to the subConn. -func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { - return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) && - a.Metadata == b.Metadata -} - -func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { - return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddresses(addrs) + addrs = copyAddressesWithoutBalancerAttributes(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -938,7 +947,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { + if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -957,7 +966,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { + if a.Equal(ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -983,9 +992,11 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } + ac.mu.Unlock() + // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransportAndUnlock() + go ac.resetTransport() } // getServerName determines the serverName to be used in the connection @@ -1179,8 +1190,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateReadyChan chan struct{} // closed and recreated on every READY state change. + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1193,6 +1204,9 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1200,7 +1214,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, ac.curAddr, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1217,10 +1231,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -// resetTransportAndUnlock unconditionally connects the addrConn. -// -// ac.mu must be held by the caller, and this function will guarantee it is released. -func (ac *addrConn) resetTransportAndUnlock() { +func (ac *addrConn) resetTransport() { + ac.mu.Lock() acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1510,7 +1522,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { for ctx.Err() == nil { ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateReadyChan + t, state, sc := ac.transport, ac.state, ac.stateChan ac.mu.Unlock() if state == connectivity.Ready { return t, nil @@ -1573,7 +1585,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancellation of cc.ctx. + // closing of transports is also taken care of by cancelation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index e840858b77b1..411e3dfd47cc 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,73 +21,18 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" - "google.golang.org/grpc/mem" ) -// baseCodec captures the new encoding.CodecV2 interface without the Name -// function, allowing it to be implemented by older Codec and encoding.Codec -// implementations. The omitted Name function is only needed for the register in -// the encoding package and is not part of the core functionality. +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v any) (mem.BufferSlice, error) - Unmarshal(data mem.BufferSlice, v any) error -} - -// getCodec returns an encoding.CodecV2 for the codec of the given name (if -// registered). Initially checks the V2 registry with encoding.GetCodecV2 and -// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry -// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge -// to turn it into an encoding.CodecV2. Returns nil otherwise. -func getCodec(name string) encoding.CodecV2 { - if codecV1 := encoding.GetCodec(name); codecV1 != nil { - return newCodecV1Bridge(codecV1) - } - - return encoding.GetCodecV2(name) -} - -func newCodecV0Bridge(c Codec) baseCodec { - return codecV0Bridge{codec: c} -} - -func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { - return codecV1Bridge{ - codecV0Bridge: codecV0Bridge{codec: c}, - name: c.Name(), - } -} - -var _ baseCodec = codecV0Bridge{} - -type codecV0Bridge struct { - codec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error - } -} - -func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { - data, err := c.codec.Marshal(v) - if err != nil { - return nil, err - } - return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil -} - -func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { - return c.codec.Unmarshal(data.Materialize(), v) -} - -var _ encoding.CodecV2 = codecV1Bridge{} - -type codecV1Bridge struct { - codecV0Bridge - name string + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } -func (c codecV1Bridge) Name() string { - return c.name -} +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go index 7e4bfee88861..43726e877b8b 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -49,7 +49,7 @@ func (k KeySizeError) Error() string { // newRekeyAEAD creates a new instance of aes128gcm with rekeying. // The key argument should be 44 bytes, the first 32 bytes are used as a key -// for HKDF-expand and the remaining 12 bytes are used as a random mask for +// for HKDF-expand and the remainining 12 bytes are used as a random mask for // the counter. func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { k := len(key) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go index b5bbb5497aa3..6a9035ea254f 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -51,7 +51,7 @@ type aes128gcmRekey struct { // NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying // for ALTS record. The key argument should be 44 bytes, the first 32 bytes -// are used as a key for HKDF-expand and the remaining 12 bytes are used +// are used as a key for HKDF-expand and the remainining 12 bytes are used // as a random mask for the counter. func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { inCounter := NewInCounter(side, overflowLenAES128GCMRekey) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index b3af03590729..e1cdafb980cd 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -34,6 +34,8 @@ var ( // to a corresponding connection to a hypervisor handshaker service // instance. hsConnMap = make(map[string]*grpc.ClientConn) + // hsDialer will be reassigned in tests. + hsDialer = grpc.Dial ) // Dial dials the handshake service in the hypervisor. If a connection has @@ -48,7 +50,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error - hsConn, err = grpc.Dial(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index f478990dfbb5..38cb5cf0d744 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.1 -// protoc v5.27.1 +// protoc v4.25.2 // source: grpc/gcp/altscontext.proto package grpc_gcp diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 377723f2f034..55fc7f65f10d 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.1 -// protoc v5.27.1 +// protoc v4.25.2 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -533,7 +533,7 @@ type StartServerHandshakeReq struct { // to handshake_parameters is the integer value of HandshakeProtocol enum. HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Bytes in out_frames returned from the peer's HandshakerResp. It is possible - // that the peer's out_frames are split into multiple HandshakeReq messages. + // that the peer's out_frames are split into multiple HandshakReq messages. InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` // (Optional) Local endpoint information of the connection to the client, // such as local IP address, port number, and network protocol. diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index 34443b1d2dcf..358074b64946 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.27.1 +// - protoc-gen-go-grpc v1.4.0 +// - protoc v4.25.2 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -75,7 +75,7 @@ type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerRe // HandshakerServiceServer is the server API for HandshakerService service. // All implementations must embed UnimplementedHandshakerServiceServer -// for forward compatibility. +// for forward compatibility type HandshakerServiceServer interface { // Handshaker service accepts a stream of handshaker request, returning a // stream of handshaker response. Client is expected to send exactly one @@ -87,18 +87,14 @@ type HandshakerServiceServer interface { mustEmbedUnimplementedHandshakerServiceServer() } -// UnimplementedHandshakerServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedHandshakerServiceServer struct{} +// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations. +type UnimplementedHandshakerServiceServer struct { +} func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error { return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") } func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} -func (UnimplementedHandshakerServiceServer) testEmbeddedByValue() {} // UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HandshakerServiceServer will @@ -108,13 +104,6 @@ type UnsafeHandshakerServiceServer interface { } func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) { - // If the following call panics, it indicates UnimplementedHandshakerServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&HandshakerService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 9f2ffc8ab860..18cc9cfbd599 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.1 -// protoc v5.27.1 +// protoc v4.25.2 // source: grpc/gcp/transport_security_common.proto package grpc_gcp diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 27c1b9bb63f2..f5453d48a53f 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) @@ -61,7 +60,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithBufferPool = withBufferPool + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -93,6 +92,7 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration + recvBufferPool SharedBufferPool defaultScheme string maxCallAttempts int } @@ -677,11 +677,11 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, - BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, } @@ -758,8 +758,25 @@ func WithMaxCallAttempts(n int) DialOption { }) } -func withBufferPool(bufferPool mem.BufferPool) DialOption { +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.BufferPool = bufferPool + o.recvBufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index e7b532b6f806..0022859ad746 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./scripts/regenerate.sh +//go:generate ./regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 11d0ae142c42..5ebf88d7147f 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]any) +var registeredCodecs = make(map[string]Codec) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,6 +126,5 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - c, _ := registeredCodecs[contentSubtype].(Codec) - return c + return registeredCodecs[contentSubtype] } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go deleted file mode 100644 index 074c5e234a7b..000000000000 --- a/vendor/google.golang.org/grpc/encoding/encoding_v2.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package encoding - -import ( - "strings" - - "google.golang.org/grpc/mem" -) - -// CodecV2 defines the interface gRPC uses to encode and decode messages. Note -// that implementations of this interface must be thread safe; a CodecV2's -// methods can be called from concurrent goroutines. -type CodecV2 interface { - // Marshal returns the wire format of v. The buffers in the returned - // [mem.BufferSlice] must have at least one reference each, which will be freed - // by gRPC when they are no longer needed. - Marshal(v any) (out mem.BufferSlice, err error) - // Unmarshal parses the wire format into v. Note that data will be freed as soon - // as this function returns. If the codec wishes to guarantee access to the data - // after this function, it must take its own reference that it frees when it is - // no longer needed. - Unmarshal(data mem.BufferSlice, v any) error - // Name returns the name of the Codec implementation. The returned string - // will be used as part of content type in transmission. The result must be - // static; the result cannot change between calls. - Name() string -} - -// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and -// servers. -// -// The CodecV2 will be stored and looked up by result of its Name() method, which -// should match the content-subtype of the encoding handled by the CodecV2. This -// is case-insensitive, and is stored and looked up as lowercase. If the -// result of calling Name() is an empty string, RegisterCodecV2 will panic. See -// Content-Type on -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// If both a Codec and CodecV2 are registered with the same name, the CodecV2 -// will be used. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Codecs are -// registered with the same name, the one registered last will take effect. -func RegisterCodecV2(codec CodecV2) { - if codec == nil { - panic("cannot register a nil CodecV2") - } - if codec.Name() == "" { - panic("cannot register CodecV2 with empty string result for Name()") - } - contentSubtype := strings.ToLower(codec.Name()) - registeredCodecs[contentSubtype] = codec -} - -// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is -// registered for the content-subtype. -// -// The content-subtype is expected to be lowercase. -func GetCodecV2(contentSubtype string) CodecV2 { - c, _ := registeredCodecs[contentSubtype].(CodecV2) - return c -} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index ceec319dd2fb..66d5cdf03ec5 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2024 gRPC authors. + * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,7 +24,6 @@ import ( "fmt" "google.golang.org/grpc/encoding" - "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -33,51 +32,28 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodecV2(&codecV2{}) + encoding.RegisterCodec(codec{}) } -// codec is a CodecV2 implementation with protobuf. It is the default codec for -// gRPC. -type codecV2 struct{} +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} -func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { +func (codec) Marshal(v any) ([]byte, error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) } - size := proto.Size(vv) - if mem.IsBelowBufferPoolingThreshold(size) { - buf, err := proto.Marshal(vv) - if err != nil { - return nil, err - } - data = append(data, mem.SliceBuffer(buf)) - } else { - pool := mem.DefaultBufferPool() - buf := pool.Get(size) - if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { - pool.Put(buf) - return nil, err - } - data = append(data, mem.NewBuffer(buf, pool)) - } - - return data, nil + return proto.Marshal(vv) } -func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { +func (codec) Unmarshal(data []byte, v any) error { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) - defer buf.Free() - // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not - // really possible without a major overhaul of the proto package, but the - // vtprotobuf library may be able to support this. - return proto.Unmarshal(buf.ReadOnlyData(), vv) + return proto.Unmarshal(data, vv) } func messageV2Of(v any) proto.Message { @@ -91,6 +67,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (c *codecV2) Name() string { +func (codec) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/experimental.go b/vendor/google.golang.org/grpc/experimental/experimental.go new file mode 100644 index 000000000000..de7f13a2210e --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/experimental.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package experimental is a collection of experimental features that might +// have some rough edges to them. Housing experimental features in this package +// results in a user accessing these APIs as `experimental.Foo`, thereby making +// it explicit that the feature is experimental and using them in production +// code is at their own risk. +// +// All APIs in this package are experimental. +package experimental + +import ( + "google.golang.org/grpc" + "google.golang.org/grpc/internal" +) + +// WithRecvBufferPool returns a grpc.DialOption that configures the use of +// bufferPool for parsing incoming messages on a grpc.ClientConn. Depending on +// the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize +// one, begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the +// following options are used: WithStatsHandler, EnableTracing, or binary +// logging. In such cases, the shared buffer pool will be ignored. +// +// Note: It is not recommended to use the shared buffer pool when compression is +// enabled. +func WithRecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.DialOption { + return internal.WithRecvBufferPool.(func(grpc.SharedBufferPool) grpc.DialOption)(bufferPool) +} + +// RecvBufferPool returns a grpc.ServerOption that configures the server to use +// the provided shared buffer pool for parsing incoming messages. Depending on +// the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize +// one, begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the +// following options are used: StatsHandler, EnableTracing, or binary logging. +// In such cases, the shared buffer pool will be ignored. +// +// Note: It is not recommended to use the shared buffer pool when compression is +// enabled. +func RecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.ServerOption { + return internal.RecvBufferPool.(func(grpc.SharedBufferPool) grpc.ServerOption)(bufferPool) +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go deleted file mode 100644 index 930140f57ed9..000000000000 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ /dev/null @@ -1,270 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package stats - -import ( - "maps" - "testing" - - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" -) - -func init() { - internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting -} - -var logger = grpclog.Component("metrics-registry") - -// DefaultMetrics are the default metrics registered through global metrics -// registry. This is written to at initialization time only, and is read only -// after initialization. -var DefaultMetrics = NewMetrics() - -// MetricDescriptor is the data for a registered metric. -type MetricDescriptor struct { - // The name of this metric. This name must be unique across the whole binary - // (including any per call metrics). See - // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions - // for metric naming conventions. - Name Metric - // The description of this metric. - Description string - // The unit (e.g. entries, seconds) of this metric. - Unit string - // The required label keys for this metric. These are intended to - // metrics emitted from a stats handler. - Labels []string - // The optional label keys for this metric. These are intended to attached - // to metrics emitted from a stats handler if configured. - OptionalLabels []string - // Whether this metric is on by default. - Default bool - // The type of metric. This is set by the metric registry, and not intended - // to be set by a component registering a metric. - Type MetricType - // Bounds are the bounds of this metric. This only applies to histogram - // metrics. If unset or set with length 0, stats handlers will fall back to - // default bounds. - Bounds []float64 -} - -// MetricType is the type of metric. -type MetricType int - -// Type of metric supported by this instrument registry. -const ( - MetricTypeIntCount MetricType = iota - MetricTypeFloatCount - MetricTypeIntHisto - MetricTypeFloatHisto - MetricTypeIntGauge -) - -// Int64CountHandle is a typed handle for a int count metric. This handle -// is passed at the recording point in order to know which metric to record -// on. -type Int64CountHandle MetricDescriptor - -// Descriptor returns the int64 count handle typecast to a pointer to a -// MetricDescriptor. -func (h *Int64CountHandle) Descriptor() *MetricDescriptor { - return (*MetricDescriptor)(h) -} - -// Record records the int64 count value on the metrics recorder provided. -func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { - recorder.RecordInt64Count(h, incr, labels...) -} - -// Float64CountHandle is a typed handle for a float count metric. This handle is -// passed at the recording point in order to know which metric to record on. -type Float64CountHandle MetricDescriptor - -// Descriptor returns the float64 count handle typecast to a pointer to a -// MetricDescriptor. -func (h *Float64CountHandle) Descriptor() *MetricDescriptor { - return (*MetricDescriptor)(h) -} - -// Record records the float64 count value on the metrics recorder provided. -func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { - recorder.RecordFloat64Count(h, incr, labels...) -} - -// Int64HistoHandle is a typed handle for an int histogram metric. This handle -// is passed at the recording point in order to know which metric to record on. -type Int64HistoHandle MetricDescriptor - -// Descriptor returns the int64 histo handle typecast to a pointer to a -// MetricDescriptor. -func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { - return (*MetricDescriptor)(h) -} - -// Record records the int64 histo value on the metrics recorder provided. -func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { - recorder.RecordInt64Histo(h, incr, labels...) -} - -// Float64HistoHandle is a typed handle for a float histogram metric. This -// handle is passed at the recording point in order to know which metric to -// record on. -type Float64HistoHandle MetricDescriptor - -// Descriptor returns the float64 histo handle typecast to a pointer to a -// MetricDescriptor. -func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { - return (*MetricDescriptor)(h) -} - -// Record records the float64 histo value on the metrics recorder provided. -func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { - recorder.RecordFloat64Histo(h, incr, labels...) -} - -// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is -// passed at the recording point in order to know which metric to record on. -type Int64GaugeHandle MetricDescriptor - -// Descriptor returns the int64 gauge handle typecast to a pointer to a -// MetricDescriptor. -func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { - return (*MetricDescriptor)(h) -} - -// Record records the int64 histo value on the metrics recorder provided. -func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { - recorder.RecordInt64Gauge(h, incr, labels...) -} - -// registeredMetrics are the registered metric descriptor names. -var registeredMetrics = make(map[Metric]bool) - -// metricsRegistry contains all of the registered metrics. -// -// This is written to only at init time, and read only after that. -var metricsRegistry = make(map[Metric]*MetricDescriptor) - -// DescriptorForMetric returns the MetricDescriptor from the global registry. -// -// Returns nil if MetricDescriptor not present. -func DescriptorForMetric(metric Metric) *MetricDescriptor { - return metricsRegistry[metric] -} - -func registerMetric(name Metric, def bool) { - if registeredMetrics[name] { - logger.Fatalf("metric %v already registered", name) - } - registeredMetrics[name] = true - if def { - DefaultMetrics = DefaultMetrics.Add(name) - } -} - -// RegisterInt64Count registers the metric description onto the global registry. -// It returns a typed handle to use to recording data. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple metrics are -// registered with the same name, this function will panic. -func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { - registerMetric(descriptor.Name, descriptor.Default) - descriptor.Type = MetricTypeIntCount - descPtr := &descriptor - metricsRegistry[descriptor.Name] = descPtr - return (*Int64CountHandle)(descPtr) -} - -// RegisterFloat64Count registers the metric description onto the global -// registry. It returns a typed handle to use to recording data. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple metrics are -// registered with the same name, this function will panic. -func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { - registerMetric(descriptor.Name, descriptor.Default) - descriptor.Type = MetricTypeFloatCount - descPtr := &descriptor - metricsRegistry[descriptor.Name] = descPtr - return (*Float64CountHandle)(descPtr) -} - -// RegisterInt64Histo registers the metric description onto the global registry. -// It returns a typed handle to use to recording data. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple metrics are -// registered with the same name, this function will panic. -func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { - registerMetric(descriptor.Name, descriptor.Default) - descriptor.Type = MetricTypeIntHisto - descPtr := &descriptor - metricsRegistry[descriptor.Name] = descPtr - return (*Int64HistoHandle)(descPtr) -} - -// RegisterFloat64Histo registers the metric description onto the global -// registry. It returns a typed handle to use to recording data. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple metrics are -// registered with the same name, this function will panic. -func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { - registerMetric(descriptor.Name, descriptor.Default) - descriptor.Type = MetricTypeFloatHisto - descPtr := &descriptor - metricsRegistry[descriptor.Name] = descPtr - return (*Float64HistoHandle)(descPtr) -} - -// RegisterInt64Gauge registers the metric description onto the global registry. -// It returns a typed handle to use to recording data. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple metrics are -// registered with the same name, this function will panic. -func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { - registerMetric(descriptor.Name, descriptor.Default) - descriptor.Type = MetricTypeIntGauge - descPtr := &descriptor - metricsRegistry[descriptor.Name] = descPtr - return (*Int64GaugeHandle)(descPtr) -} - -// snapshotMetricsRegistryForTesting snapshots the global data of the metrics -// registry. Registers a cleanup function on the provided testing.T that sets -// the metrics registry to its original state. Only called in testing functions. -func snapshotMetricsRegistryForTesting(t *testing.T) { - oldDefaultMetrics := DefaultMetrics - oldRegisteredMetrics := registeredMetrics - oldMetricsRegistry := metricsRegistry - - registeredMetrics = make(map[Metric]bool) - metricsRegistry = make(map[Metric]*MetricDescriptor) - maps.Copy(registeredMetrics, registeredMetrics) - maps.Copy(metricsRegistry, metricsRegistry) - - t.Cleanup(func() { - DefaultMetrics = oldDefaultMetrics - registeredMetrics = oldRegisteredMetrics - metricsRegistry = oldMetricsRegistry - }) -} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go deleted file mode 100644 index 3221f7a633a3..000000000000 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package stats contains experimental metrics/stats API's. -package stats - -import "maps" - -// MetricsRecorder records on metrics derived from metric registry. -type MetricsRecorder interface { - // RecordInt64Count records the measurement alongside labels on the int - // count associated with the provided handle. - RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) - // RecordFloat64Count records the measurement alongside labels on the float - // count associated with the provided handle. - RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) - // RecordInt64Histo records the measurement alongside labels on the int - // histo associated with the provided handle. - RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) - // RecordFloat64Histo records the measurement alongside labels on the float - // histo associated with the provided handle. - RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) - // RecordInt64Gauge records the measurement alongside labels on the int - // gauge associated with the provided handle. - RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) -} - -// Metric is an identifier for a metric. -type Metric string - -// Metrics is a set of metrics to record. Once created, Metrics is immutable, -// however Add and Remove can make copies with specific metrics added or -// removed, respectively. -// -// Do not construct directly; use NewMetrics instead. -type Metrics struct { - // metrics are the set of metrics to initialize. - metrics map[Metric]bool -} - -// NewMetrics returns a Metrics containing Metrics. -func NewMetrics(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Metrics returns the metrics set. The returned map is read-only and must not -// be modified. -func (m *Metrics) Metrics() map[Metric]bool { - return m.metrics -} - -// Add adds the metrics to the metrics set and returns a new copy with the -// additional metrics. -func (m *Metrics) Add(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Join joins the metrics passed in with the metrics set, and returns a new copy -// with the merged metrics. -func (m *Metrics) Join(metrics *Metrics) *Metrics { - newMetrics := make(map[Metric]bool) - maps.Copy(newMetrics, m.metrics) - maps.Copy(newMetrics, metrics.metrics) - return &Metrics{ - metrics: newMetrics, - } -} - -// Remove removes the metrics from the metrics set and returns a new copy with -// the metrics removed. -func (m *Metrics) Remove(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - delete(newMetrics, metric) - } - return &Metrics{ - metrics: newMetrics, - } -} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index f1ae080dcb81..ac73c9ced255 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,6 +20,8 @@ package grpclog import ( "fmt" + + "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -31,22 +33,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - InfoDepth(depth+1, args...) + grpclog.InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - WarningDepth(depth+1, args...) + grpclog.WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - ErrorDepth(depth+1, args...) + grpclog.ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - FatalDepth(depth+1, args...) + grpclog.FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index db320105e64e..16928c9cb993 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,15 +18,18 @@ // Package grpclog defines logging for grpc. // -// In the default logger, severity level can be set by environment variable -// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by -// GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" import ( "os" - "google.golang.org/grpc/grpclog/internal" + "google.golang.org/grpc/internal/grpclog" ) func init() { @@ -35,58 +38,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return internal.LoggerV2Impl.V(l) + return grpclog.Logger.V(l) } // Info logs to the INFO log. func Info(args ...any) { - internal.LoggerV2Impl.Info(args...) + grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - internal.LoggerV2Impl.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - internal.LoggerV2Impl.Infoln(args...) + grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - internal.LoggerV2Impl.Warning(args...) + grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - internal.LoggerV2Impl.Warningf(format, args...) + grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - internal.LoggerV2Impl.Warningln(args...) + grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - internal.LoggerV2Impl.Error(args...) + grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - internal.LoggerV2Impl.Errorf(format, args...) + grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - internal.LoggerV2Impl.Errorln(args...) + grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - internal.LoggerV2Impl.Fatal(args...) + grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -94,15 +97,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - internal.LoggerV2Impl.Fatalf(format, args...) + grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calls os.Exit() with exit code 1. +// It calle os.Exit()) with exit code 1. func Fatalln(args ...any) { - internal.LoggerV2Impl.Fatalln(args...) + grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -111,76 +114,19 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - internal.LoggerV2Impl.Info(args...) + grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - internal.LoggerV2Impl.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - internal.LoggerV2Impl.Infoln(args...) -} - -// InfoDepth logs to the INFO log at the specified depth. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func InfoDepth(depth int, args ...any) { - if internal.DepthLoggerV2Impl != nil { - internal.DepthLoggerV2Impl.InfoDepth(depth, args...) - } else { - internal.LoggerV2Impl.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WarningDepth(depth int, args ...any) { - if internal.DepthLoggerV2Impl != nil { - internal.DepthLoggerV2Impl.WarningDepth(depth, args...) - } else { - internal.LoggerV2Impl.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func ErrorDepth(depth int, args ...any) { - if internal.DepthLoggerV2Impl != nil { - internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) - } else { - internal.LoggerV2Impl.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func FatalDepth(depth int, args ...any) { - if internal.DepthLoggerV2Impl != nil { - internal.DepthLoggerV2Impl.FatalDepth(depth, args...) - } else { - internal.LoggerV2Impl.Fatalln(args...) - } - os.Exit(1) + grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go deleted file mode 100644 index 59c03bc14c2a..000000000000 --- a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains functionality internal to the grpclog package. -package internal - -// LoggerV2Impl is the logger used for the non-depth log functions. -var LoggerV2Impl LoggerV2 - -// DepthLoggerV2Impl is the logger used for the depth log functions. -var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go deleted file mode 100644 index 0d9a824ce1ba..000000000000 --- a/vendor/google.golang.org/grpc/grpclog/internal/logger.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -// Logger mimics golang's standard Logger as an interface. -// -// Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} - -// LoggerWrapper wraps Logger into a LoggerV2. -type LoggerWrapper struct { - Logger -} - -// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. -func (l *LoggerWrapper) Info(args ...any) { - l.Logger.Print(args...) -} - -// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. -func (l *LoggerWrapper) Infoln(args ...any) { - l.Logger.Println(args...) -} - -// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. -func (l *LoggerWrapper) Infof(format string, args ...any) { - l.Logger.Printf(format, args...) -} - -// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. -func (l *LoggerWrapper) Warning(args ...any) { - l.Logger.Print(args...) -} - -// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. -func (l *LoggerWrapper) Warningln(args ...any) { - l.Logger.Println(args...) -} - -// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. -func (l *LoggerWrapper) Warningf(format string, args ...any) { - l.Logger.Printf(format, args...) -} - -// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. -func (l *LoggerWrapper) Error(args ...any) { - l.Logger.Print(args...) -} - -// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. -func (l *LoggerWrapper) Errorln(args ...any) { - l.Logger.Println(args...) -} - -// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. -func (l *LoggerWrapper) Errorf(format string, args ...any) { - l.Logger.Printf(format, args...) -} - -// V reports whether verbosity level l is at least the requested verbose level. -func (*LoggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true -} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 4b203585707a..b1674d8267ca 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,17 +18,70 @@ package grpclog -import "google.golang.org/grpc/grpclog/internal" +import "google.golang.org/grpc/internal/grpclog" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger internal.Logger +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} + grpclog.Logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 892dc13d164b..ecfd36d71303 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,16 +19,52 @@ package grpclog import ( + "encoding/json" + "fmt" "io" + "log" "os" "strconv" "strings" - "google.golang.org/grpc/grpclog/internal" + "google.golang.org/grpc/internal/grpclog" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 internal.LoggerV2 +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -36,8 +72,34 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - internal.LoggerV2Impl = l - internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -46,13 +108,32 @@ func SetLoggerV2(l LoggerV2) { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -80,10 +161,80 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ - Verbosity: v, - FormatJSON: jsonFormat, + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -94,4 +245,14 @@ func newLoggerV2() LoggerV2 { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 internal.DepthLoggerV2 +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...any) +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index e65cf0ea15e4..38b883507350 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.1 -// protoc v5.27.1 +// protoc v4.25.2 // source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index f96b8ab4927e..51b736ba06e5 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.27.1 +// - protoc-gen-go-grpc v1.4.0 +// - protoc v4.25.2 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -73,7 +73,7 @@ type HealthClient interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) } type healthClient struct { @@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} + x := &healthWatchClient{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -110,12 +110,26 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts . return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] +type Health_WatchClient interface { + Recv() (*HealthCheckResponse, error) + grpc.ClientStream +} + +type healthWatchClient struct { + grpc.ClientStream +} + +func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + m := new(HealthCheckResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer -// for forward compatibility. +// for forward compatibility // // Health is gRPC's mechanism for checking whether a server is able to handle // RPCs. Its semantics are documented in @@ -146,23 +160,19 @@ type HealthServer interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error + Watch(*HealthCheckRequest, Health_WatchServer) error } -// UnimplementedHealthServer should be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedHealthServer struct{} +// UnimplementedHealthServer should be embedded to have forward compatible implementations. +type UnimplementedHealthServer struct { +} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } -func (UnimplementedHealthServer) testEmbeddedByValue() {} // UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HealthServer will @@ -172,13 +182,6 @@ type UnsafeHealthServer interface { } func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { - // If the following call panics, it indicates UnimplementedHealthServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&Health_ServiceDesc, srv) } @@ -205,11 +208,21 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) + return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) +} + +type Health_WatchServer interface { + Send(*HealthCheckResponse) error + grpc.ServerStream } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] +type healthWatchServer struct { + grpc.ServerStream +} + +func (x *healthWatchServer) Send(m *HealthCheckResponse) error { + return x.ServerStream.SendMsg(m) +} // Health_ServiceDesc is the grpc.ServiceDesc for Health service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go index 31c9cdc9d026..5496b99dd5c4 100644 --- a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go @@ -99,7 +99,7 @@ func (sbc *subBalancerWrapper) startBalancer() { if sbc.balancer == nil { sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts) } - sbc.group.logger.Infof("Creating child policy of type %q for child %q", sbc.builder.Name(), sbc.id) + sbc.group.logger.Infof("Creating child policy of type %q for locality %q", sbc.builder.Name(), sbc.id) sbc.balancer.SwitchTo(sbc.builder) if sbc.ccState != nil { sbc.balancer.UpdateClientConnState(*sbc.ccState) @@ -121,11 +121,14 @@ func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) sbc.ccState = &s b := sbc.balancer if b == nil { - // A sub-balancer is closed when it is removed from the group or the - // group is closed as a whole, and is not expected to receive updates - // after that. But when used with the priority LB policy a sub-balancer - // (and the whole balancer group) could be closed because it's the lower - // priority, but it can still get address updates. + // This sub-balancer was closed. This should never happen because + // sub-balancers are closed when the locality is removed from EDS, or + // the balancer group is closed. There should be no further address + // updates when either of this happened. + // + // This will be a common case with priority support, because a + // sub-balancer (and the whole balancer group) could be closed because + // it's the lower priority, but it can still get address updates. return nil } return b.UpdateClientConnState(s) @@ -134,11 +137,14 @@ func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) func (sbc *subBalancerWrapper) resolverError(err error) { b := sbc.balancer if b == nil { - // A sub-balancer is closed when it is removed from the group or the - // group is closed as a whole, and is not expected to receive updates - // after that. But when used with the priority LB policy a sub-balancer - // (and the whole balancer group) could be closed because it's the lower - // priority, but it can still get address updates. + // This sub-balancer was closed. This should never happen because + // sub-balancers are closed when the locality is removed from EDS, or + // the balancer group is closed. There should be no further address + // updates when either of this happened. + // + // This will be a common case with priority support, because a + // sub-balancer (and the whole balancer group) could be closed because + // it's the lower priority, but it can still get address updates. return } b.ResolverError(err) @@ -204,7 +210,7 @@ type BalancerGroup struct { // after it's closed. // // We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer - // may call back to balancer group inline. It causes deadlock if they + // may call back to balancer group inline. It causes deaclock if they // require the same mutex). // // We should never need to hold multiple locks at the same time in this @@ -218,7 +224,7 @@ type BalancerGroup struct { // guards the map from SubConn to balancer ID, so updateSubConnState needs // to hold it shortly to potentially delete from the map. // - // UpdateState is called by the balancer state aggregator, and it will + // UpdateState is called by the balancer state aggretator, and it will // decide when and whether to call. // // The corresponding boolean incomingStarted is used to stop further updates @@ -292,11 +298,11 @@ func (bg *BalancerGroup) Start() { // AddWithClientConn adds a balancer with the given id to the group. The // balancer is built with a balancer builder registered with balancerName. The // given ClientConn is passed to the newly built balancer instead of the -// one passed to balancergroup.New(). +// onepassed to balancergroup.New(). // // TODO: Get rid of the existing Add() API and replace it with this. func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error { - bg.logger.Infof("Adding child policy of type %q for child %q", balancerName, id) + bg.logger.Infof("Adding child policy of type %q for locality %q", balancerName, id) builder := balancer.Get(balancerName) if builder == nil { return fmt.Errorf("unregistered balancer name %q", balancerName) @@ -312,7 +318,7 @@ func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer. if bg.outgoingStarted && bg.deletedBalancerCache != nil { if old, ok := bg.deletedBalancerCache.Remove(id); ok { if bg.logger.V(2) { - bg.logger.Infof("Removing and reusing child policy of type %q for child %q from the balancer cache", balancerName, id) + bg.logger.Infof("Removing and reusing child policy of type %q for locality %q from the balancer cache", balancerName, id) bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) } @@ -366,13 +372,13 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { // closed after timeout. Cleanup work (closing sub-balancer and removing // subconns) will be done after timeout. func (bg *BalancerGroup) Remove(id string) { - bg.logger.Infof("Removing child policy for child %q", id) + bg.logger.Infof("Removing child policy for locality %q", id) bg.outgoingMu.Lock() sbToRemove, ok := bg.idToBalancerConfig[id] if !ok { - bg.logger.Errorf("Child policy for child %q does not exist in the balancer group", id) + bg.logger.Errorf("Child policy for locality %q does not exist in the balancer group", id) bg.outgoingMu.Unlock() return } @@ -388,13 +394,13 @@ func (bg *BalancerGroup) Remove(id string) { if bg.deletedBalancerCache != nil { if bg.logger.V(2) { - bg.logger.Infof("Adding child policy for child %q to the balancer cache", id) + bg.logger.Infof("Adding child policy for locality %q to the balancer cache", id) bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) } bg.deletedBalancerCache.Add(id, sbToRemove, func() { if bg.logger.V(2) { - bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id) + bg.logger.Infof("Removing child policy for locality %q from the balancer cache after timeout", id) bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) } @@ -535,7 +541,7 @@ func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver // aggregator will create an aggregated picker and an aggregated connectivity // state, then forward to ClientConn. func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) { - bg.logger.Infof("Balancer state update from child %v, new state: %+v", id, state) + bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state) // Send new state to the aggregator, without holding the incomingMu. // incomingMu is to protect all calls to the parent ClientConn, this update diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index bb531225d5f4..dfe18b08925d 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided into two categories with respect to +// Methods of channelMap can be divided in two two categories with respect to // locking. // // 1. Methods acquire the global lock. diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 00abc7c2beb0..d90648713944 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -46,10 +46,6 @@ var ( // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) - // XDSFallbackSupport is the env variable that controls whether support for - // xDS fallback is turned on. If this is unset or is false, only the first - // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7617be215895..7f7044e1731c 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithBufferPool is implemented by the grpc package and returns a dial + // WithRecvBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // BufferPool is implemented by the grpc package and returns a server + // RecvBufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go similarity index 52% rename from vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go rename to vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index 07df71e98a87..bfc45102ab24 100644 --- a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -1,6 +1,6 @@ /* * - * Copyright 2024 gRPC authors. + * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,59 @@ * */ -package internal +// Package grpclog (internal) defines depth logging for grpc. +package grpclog import ( - "encoding/json" - "fmt" - "io" - "log" "os" ) +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatalln(args...) + } + os.Exit(1) +} + // LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -65,13 +107,14 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { - LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -81,124 +124,3 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, - }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v -} - -// LoggerV2Config configures the LoggerV2 implementation. -type LoggerV2Config struct { - // Verbosity sets the verbosity level of the logger. - Verbosity int - // FormatJSON controls whether the logger should output logs in JSON format. - FormatJSON bool -} - -// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. -// The infoW, warningW, and errorW writers are used to write log messages of -// different severity levels. -func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.FormatJSON { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} -} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 092ad187a2c8..faa998de7632 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -16,21 +16,17 @@ * */ -// Package grpclog provides logging functionality for internal gRPC packages, -// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" - - "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger grpclog.DepthLoggerV2 + logger DepthLoggerV2 prefix string } @@ -42,7 +38,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) + InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -52,7 +48,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) + WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -62,18 +58,36 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) + ErrorDepth(1, fmt.Sprintf(format, args...)) } -// V reports whether verbosity level l is at least the requested verbose level. -func (pl *PrefixLogger) V(l int) bool { +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...any) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + if !Logger.V(2) { + return + } if pl != nil { - return pl.logger.V(l) + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return } - return true + InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 19b9d639275a..f7f40a16acee 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,28 +53,16 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// TrySchedule tries to schedules the provided callback function f to be -// executed in the order it was added. This is a best-effort operation. If the -// context passed to NewCallbackSerializer was canceled before this method is -// called, the callback will not be scheduled. +// Schedule adds a callback to be scheduled after existing callbacks are run. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. -func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { - cs.callbacks.Put(f) -} - -// ScheduleOr schedules the provided callback function f to be executed in the -// order it was added. If the context passed to NewCallbackSerializer has been -// canceled before this method is called, the onFailure callback will be -// executed inline instead. // -// Callbacks are expected to honor the context when performing any blocking -// operations, and should return early when the context is canceled. -func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { - if cs.callbacks.Put(f) != nil { - onFailure() - } +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + return cs.callbacks.Put(f) == nil } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index 6d8c2f518dff..aef8cec1ab0c 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.TrySchedule(func(context.Context) { + ps.cs.Schedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.TrySchedule(func(context.Context) { + ps.cs.Schedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 65f936a623aa..5d6653986923 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -208,27 +208,6 @@ var ( // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n // is the number of elements. swap swaps the elements with indexes i and j. ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) - - // ConnectedAddress returns the connected address for a SubConnState. The - // address is only valid if the state is READY. - ConnectedAddress any // func (scs SubConnState) resolver.Address - - // SetConnectedAddress sets the connected address for a SubConnState. - SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) - - // SnapshotMetricRegistryForTesting snapshots the global data of the metric - // registry. Registers a cleanup function on the provided testing.T that - // sets the metric registry to its original state. Only called in testing - // functions. - SnapshotMetricRegistryForTesting any // func(t *testing.T) - - // SetDefaultBufferPoolForTesting updates the default buffer pool, for - // testing purposes. - SetDefaultBufferPoolForTesting any // func(mem.BufferPool) - - // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for - // testing purposes. - SetBufferPoolingThresholdForTesting any // func(int) ) // HealthChecker defines the signature of the client-side LB channel health diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index 98a0d4771f59..3244718625cb 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.1 -// protoc v5.27.1 +// protoc v4.25.2 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go index a211bc3fc68a..c42cb8cba0c2 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.1 -// protoc v5.27.1 +// protoc v4.25.2 // source: grpc/lookup/v1/rls_config.proto package grpc_lookup_v1 diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go index 23dcb2100c3d..5c7a25efd840 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.27.1 +// - protoc-gen-go-grpc v1.4.0 +// - protoc v4.25.2 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 @@ -64,25 +64,21 @@ func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLoo // RouteLookupServiceServer is the server API for RouteLookupService service. // All implementations must embed UnimplementedRouteLookupServiceServer -// for forward compatibility. +// for forward compatibility type RouteLookupServiceServer interface { // Lookup returns a target for a single key. RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) mustEmbedUnimplementedRouteLookupServiceServer() } -// UnimplementedRouteLookupServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedRouteLookupServiceServer struct{} +// UnimplementedRouteLookupServiceServer must be embedded to have forward compatible implementations. +type UnimplementedRouteLookupServiceServer struct { +} func (UnimplementedRouteLookupServiceServer) RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RouteLookup not implemented") } func (UnimplementedRouteLookupServiceServer) mustEmbedUnimplementedRouteLookupServiceServer() {} -func (UnimplementedRouteLookupServiceServer) testEmbeddedByValue() {} // UnsafeRouteLookupServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to RouteLookupServiceServer will @@ -92,13 +88,6 @@ type UnsafeRouteLookupServiceServer interface { } func RegisterRouteLookupServiceServer(s grpc.ServiceRegistrar, srv RouteLookupServiceServer) { - // If the following call panics, it indicates UnimplementedRouteLookupServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&RouteLookupService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go deleted file mode 100644 index be110d41f9a4..000000000000 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package stats - -import ( - "fmt" - - estats "google.golang.org/grpc/experimental/stats" - "google.golang.org/grpc/stats" -) - -// MetricsRecorderList forwards Record calls to all of its metricsRecorders. -// -// It eats any record calls where the label values provided do not match the -// number of label keys. -type MetricsRecorderList struct { - // metricsRecorders are the metrics recorders this list will forward to. - metricsRecorders []estats.MetricsRecorder -} - -// NewMetricsRecorderList creates a new metric recorder list with all the stats -// handlers provided which implement the MetricsRecorder interface. -// If no stats handlers provided implement the MetricsRecorder interface, -// the MetricsRecorder list returned is a no-op. -func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { - var mrs []estats.MetricsRecorder - for _, sh := range shs { - if mr, ok := sh.(estats.MetricsRecorder); ok { - mrs = append(mrs, mr) - } - } - return &MetricsRecorderList{ - metricsRecorders: mrs, - } -} - -func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { - if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { - panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) - } -} - -func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { - verifyLabels(handle.Descriptor(), labels...) - - for _, metricRecorder := range l.metricsRecorders { - metricRecorder.RecordInt64Count(handle, incr, labels...) - } -} - -func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { - verifyLabels(handle.Descriptor(), labels...) - - for _, metricRecorder := range l.metricsRecorders { - metricRecorder.RecordFloat64Count(handle, incr, labels...) - } -} - -func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { - verifyLabels(handle.Descriptor(), labels...) - - for _, metricRecorder := range l.metricsRecorders { - metricRecorder.RecordInt64Histo(handle, incr, labels...) - } -} - -func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { - verifyLabels(handle.Descriptor(), labels...) - - for _, metricRecorder := range l.metricsRecorders { - metricRecorder.RecordFloat64Histo(handle, incr, labels...) - } -} - -func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { - verifyLabels(handle.Descriptor(), labels...) - - for _, metricRecorder := range l.metricsRecorders { - metricRecorder.RecordInt64Gauge(handle, incr, labels...) - } -} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 7e7aaa546368..078137b7fd70 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keepalive interval and time parameters. + // the TCP keealive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index d5c1085eeaec..fd7d43a8907b 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keepalive interval and time parameters. + // the TCP keealive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ea0633bbdab8..3deadfb4a20c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,7 +32,6 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -149,9 +148,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - reader mem.Reader + d []byte // onEachWrite is called every time - // a part of data is written out. + // a part of d is written out. onEachWrite func() } @@ -290,22 +289,18 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// -// Information is passed as specific struct types called control frames. A -// control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. It -// shouldn't be confused with an HTTP2 frame, although some of the control -// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - wakeupCh chan struct{} // Unblocks readers waiting for something to read. - done <-chan struct{} // Closed when the transport is done. - - // Mutex guards all the fields below, except trfChan which can be read - // atomically without holding mu. + ch chan struct{} + done <-chan struct{} mu sync.Mutex - consumerWaiting bool // True when readers are blocked waiting for new data. - closed bool // True when the controlbuf is finished. - list *itemList // List of queued control frames. + consumerWaiting bool + list *itemList + err error // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -313,59 +308,47 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Pointer[chan struct{}] + trfChan atomic.Value // chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - wakeupCh: make(chan struct{}, 1), - list: &itemList{}, - done: done, + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many frames in the control buf that -// represent the response of an action initiated by the peer, like -// incomingSettings cleanupStreams etc. +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. func (c *controlBuffer) throttle() { - if ch := c.trfChan.Load(); ch != nil { + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { select { - case <-(*ch): + case <-ch: case <-c.done: } } } -// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } -// executeAndPut runs f, and if the return value is true, adds the given item to -// the controlbuf. The item could be nil, in which case, this method simply -// executes f and does not add the item to the controlbuf. -// -// The first return value indicates whether the item was successfully added to -// the control buffer. A non-nil error, specifically ErrConnClosing, is returned -// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { + var wakeUp bool c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { - return false, ErrConnClosing + if c.err != nil { + c.mu.Unlock() + return false, c.err } if f != nil { if !f() { // f wasn't successful + c.mu.Unlock() return false, nil } } - if it == nil { - return true, nil - } - - var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -376,102 +359,98 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - ch := make(chan struct{}) - c.trfChan.Store(&ch) + c.trfChan.Store(make(chan struct{})) } } + c.mu.Unlock() if wakeUp { select { - case c.wakeupCh <- struct{}{}: + case c.ch <- struct{}{}: default: } } return true, nil } -// get returns the next control frame from the control buffer. If block is true -// **and** there are no control frames in the control buffer, the call blocks -// until one of the conditions is met: there is a frame to return or the -// transport is closed. +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - frame, err := c.getOnceLocked() - if frame != nil || err != nil || !block { - // If we read a frame or an error, we can return to the caller. The - // call to getOnceLocked() returns a nil frame and a nil error if - // there is nothing to read, and in that case, if the caller asked - // us not to block, we can return now as well. + if c.err != nil { c.mu.Unlock() - return frame, err + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil } c.consumerWaiting = true c.mu.Unlock() - - // Release the lock above and wait to be woken up. select { - case <-c.wakeupCh: + case <-c.ch: case <-c.done: return nil, errors.New("transport closed by client") } } } -// Callers must not use this method, but should instead use get(). -// -// Caller must hold c.mu. -func (c *controlBuffer) getOnceLocked() (any, error) { - if c.closed { - return false, ErrConnClosing - } - if c.list.isEmpty() { - return nil, nil - } - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Swap(nil) - close(*ch) - } - c.transportResponseFrames-- - } - return h, nil -} - -// finish closes the control buffer, cleaning up any streams that have queued -// header frames. Once this method returns, no more frames can be added to the -// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - defer c.mu.Unlock() - - if c.closed { + if c.err != nil { + c.mu.Unlock() return } - c.closed = true + c.err = ErrConnClosing // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - switch v := head.it.(type) { - case *headerFrame: - if v.onOrphaned != nil { // It will be nil on the server-side. - v.onOrphaned(ErrConnClosing) - } - case *dataFrame: - _ = v.reader.Close() + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) } } - // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch := c.trfChan.Swap(nil) + ch, _ := c.trfChan.Load().(chan struct{}) if ch != nil { - close(*ch) + close(ch) } + c.trfChan.Store((chan struct{})(nil)) + c.mu.Unlock() } type side int @@ -487,7 +466,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resembling a round-robin scheduling over all streams. While +// thereby closely resemebling to a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -511,13 +490,12 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger - bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -533,7 +511,6 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, - bufferPool: bufferPool, } return l } @@ -791,11 +768,6 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() - for head := str.itl.dequeueAll(); head != nil; head = head.next { - if df, ok := head.it.(*dataFrame); ok { - _ = df.reader.Close() - } - } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -931,18 +903,16 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and data - // that is the actual message. As an optimization to keep wire traffic low, data - // from data is copied to h to make as big as the maximum possible HTTP2 frame - // size. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possible HTTP2 frame size. - if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream - _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -957,7 +927,9 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - + var ( + buf []byte + ) // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -971,50 +943,43 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, dataItem.reader.Remaining()) - remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize - size := hSize + dSize - - var buf *[]byte - - if hSize != 0 && dSize == 0 { - buf = &dataItem.h - } else { - // Note: this is only necessary because the http2.Framer does not support - // partially writing a frame, so the sequence must be materialized into a buffer. - // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. - pool := l.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] } - buf = pool.Get(size) - defer pool.Put(buf) - - copy((*buf)[:hSize], dataItem.h) - _, _ = dataItem.reader.Read((*buf)[hSize:]) + } else { + buf = dataItem.d } + size := hSize + dSize + // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && remainingBytes == 0 { + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] - if remainingBytes == 0 { // All the data from that message was written out. - _ = dataItem.reader.Close() + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index e1cd86b2fcee..4a3ddce29a4e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,6 +24,7 @@ package transport import ( + "bytes" "context" "errors" "fmt" @@ -39,7 +40,6 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,7 +98,6 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, - bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -172,8 +171,6 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger - - bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -247,7 +244,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() - defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -272,6 +268,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -333,28 +330,16 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { - // Always take a reference because otherwise there is no guarantee the data will - // be available after this function returns. This is what callers to Write - // expect. - data.Ref() +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { headersWritten := s.updateHeaderSent() - err := ht.do(func() { - defer data.Free() + return ht.do(func() { if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - for _, b := range data { - _, _ = ht.rw.Write(b.ReadOnlyData()) - } + ht.rw.Write(data) ht.rw.(http.Flusher).Flush() }) - if err != nil { - data.Free() - return err - } - return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -421,7 +406,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, windowHandler: func(int) {}, } @@ -430,19 +415,21 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - for { - buf := ht.bufferPool.Get(http2MaxFrameLen) - n, err := req.Body.Read(*buf) + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) if n > 0 { - *buf = (*buf)[:n] - s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) - } else { - ht.bufferPool.Put(buf) + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } + if len(buf) == 0 { + buf = make([]byte, readSize) + } } }() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f46194fdc62e..3c63c706986d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,7 +47,6 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -60,8 +59,6 @@ import ( // atomically. var clientConnectionCounter uint64 -var goAwayLoopyWriterTimeout = 5 * time.Second - var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -147,7 +144,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool mem.BufferPool + bufferPool *bufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -232,7 +229,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancellation + // The following defer and goroutine monitor the connectCtx for cancelation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -349,7 +346,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: opts.BufferPool, + bufferPool: newBufferPool(), onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -466,7 +463,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -507,6 +504,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, + freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -985,7 +983,6 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { - t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1009,20 +1006,10 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() - // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It - // also waits for loopyWriter to be closed with a timer to avoid the - // long blocking in case the connection is blackholed, i.e. TCP is - // just stuck. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - timer := time.NewTimer(goAwayLoopyWriterTimeout) - defer timer.Stop() - select { - case <-t.writerDone: // success - case <-timer.C: - t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) - } + <-t.writerDone t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1078,36 +1065,27 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { - reader := data.Reader() - +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { - _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { - _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - reader: reader, + d: data, } - if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { return err } } - if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() - return err - } - return nil + return t.controlBuf.put(df) } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1212,13 +1190,10 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) } } // The server has closed the stream without sending trailers. Record that @@ -1247,7 +1222,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancellation. Alter the status code accordingly. + // of this cancelation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1332,7 +1307,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index f5163f770c8d..b7091165b501 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -39,7 +39,6 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" - "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" @@ -120,7 +119,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool mem.BufferPool + bufferPool *bufferPool connectionID uint64 @@ -262,7 +261,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: config.BufferPool, + bufferPool: newBufferPool(), } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -331,7 +330,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -614,9 +613,10 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,13 +813,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) } } if f.StreamEnded() { @@ -1092,9 +1089,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.executeAndPut(func() bool { - return t.checkForHeaderListSize(trailingHeader) - }, nil) + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { return err @@ -1117,37 +1112,27 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { - reader := data.Reader() - +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - _ = reader.Close() return t.streamContextErr(s) } } - df := &dataFrame{ streamID: s.id, h: hdr, - reader: reader, + d: data, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { return t.streamContextErr(s) } - if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() - return err - } - return nil + return t.controlBuf.put(df) } // keepalive running in a separate goroutine does the following: diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index f609c6c66595..39cef3bd442e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,32 +317,28 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (int, error) { +func (w *bufWriter) Write(b []byte) (n int, err error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err := w.conn.Write(b) + n, err = w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } - written := 0 for len(b) > 0 { - copied := copy(w.buf[w.offset:], b) - b = b[copied:] - written += copied - w.offset += copied - if w.offset < w.batchSize { - continue - } - if err := w.flushKeepBuffer(); err != nil { - return written, err + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.flushKeepBuffer() } } - return written, nil + return n, err } func (w *bufWriter) Flush() error { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 54b224436544..24fa1032574c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,14 +107,8 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - // The buffer could contain extra bytes from the target server, so we can't - // discard it. However, in many cases where the server waits for the client - // to send the first message (e.g. when TLS is being used), the buffer will - // be empty, so we can avoid the overhead of reading through this buffer. - if r.Buffered() != 0 { - return &bufConn{Conn: conn, r: r}, nil - } - return conn, nil + + return &bufConn{Conn: conn, r: r}, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index fdd6fa86cc15..4b39c0ade97c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,6 +22,7 @@ package transport import ( + "bytes" "context" "errors" "fmt" @@ -36,7 +37,6 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,10 +47,32 @@ import ( const logLevel = 2 +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer mem.Buffer + buffer *bytes.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -80,9 +102,6 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { - // drop the buffer on the floor. Since b.err is not nil, any subsequent reads - // will always return an error, making this buffer inaccessible. - r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -129,97 +148,45 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last mem.Buffer // Stores the remaining data in the previous calls. + last *bytes.Buffer // Stores the remaining data in the previous calls. err error + freeBuffer func(*bytes.Buffer) } -func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - n, r.last = mem.ReadUnsafe(header, r.last) - return n, nil - } - if r.closeStream != nil { - n, r.err = r.readHeaderClient(header) - } else { - n, r.err = r.readHeader(header) - } - return n, r.err -} - -// Read reads the next n bytes from last. If last is drained, it tries to read -// additional data from recv. It blocks if there no additional data available in -// recv. If Read returns any non-nil error, it will continue to return that -// error. -func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { - if r.err != nil { - return nil, r.err - } - if r.last != nil { - buf = r.last - if r.last.Len() > n { - buf, r.last = mem.SplitUnsafe(buf, n) - } else { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) r.last = nil } - return buf, nil + return copied, nil } if r.closeStream != nil { - buf, r.err = r.readClient(n) + n, r.err = r.readClient(p) } else { - buf, r.err = r.read(n) + n, r.err = r.read(p) } - return buf, r.err + return n, r.err } -func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) read(p []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) - } -} - -func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { - select { - case <-r.ctxDone: - return nil, ContextErr(r.ctx.Err()) - case m := <-r.recv.get(): - return r.readAdditional(m, n) - } -} - -func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { - // If the context is canceled, then closes the stream with nil metadata. - // closeStream writes its error parameter to r.recv as a recvMsg. - // r.readAdditional acts on that message and returns the necessary error. - select { - case <-r.ctxDone: - // Note that this adds the ctx error to the end of recv buffer, and - // reads from the head. This will delay the error until recv buffer is - // empty, thus will delay ctx cancellation in Recv(). - // - // It's done this way to fix a race between ctx cancel and trailer. The - // race was, stream.Recv() may return ctx error if ctxDone wins the - // race, but stream.Trailer() may return a non-nil md because the stream - // was not marked as done when trailer is received. This closeStream - // call will mark stream as done, thus fix the race. - // - // TODO: delaying ctx error seems like a unnecessary side effect. What - // we really want is to mark the stream as done, and return ctx error - // faster. - r.closeStream(ContextErr(r.ctx.Err())) - m := <-r.recv.get() - return r.readHeaderAdditional(m, header) - case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readAdditional(m, p) } } -func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -240,40 +207,25 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, n) + return r.readAdditional(m, p) case m := <-r.recv.get(): - return r.readAdditional(m, n) + return r.readAdditional(m, p) } } -func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { r.recv.load() if m.err != nil { - if m.buffer != nil { - m.buffer.Free() - } return 0, m.err } - - n, r.last = mem.ReadUnsafe(header, m.buffer) - - return n, nil -} - -func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { - r.recv.load() - if m.err != nil { - if m.buffer != nil { - m.buffer.Free() - } - return nil, m.err - } - - if m.buffer.Len() > n { - m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer } - - return m.buffer, nil + return copied, nil } type streamState uint32 @@ -289,7 +241,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct ClientTransport // nil for server side Stream + ct *http2Client // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -299,7 +251,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader *transportReader + trReader io.Reader fc *inFlow wq *writeQuota @@ -456,7 +408,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metadata. Note that if it is not called +// Trailer returns the cached trailer metedata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -547,87 +499,36 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -func (s *Stream) ReadHeader(header []byte) (err error) { - // Don't request a read if there was an error earlier - if er := s.trReader.er; er != nil { - return er - } - s.requestRead(len(header)) - for len(header) != 0 { - n, err := s.trReader.ReadHeader(header) - header = header[n:] - if len(header) == 0 { - err = nil - } - if err != nil { - if n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - } - return nil -} - -// Read reads n bytes from the wire for this stream. -func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.er; er != nil { - return nil, er + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er } - s.requestRead(n) - for n != 0 { - buf, err := s.trReader.Read(n) - var bufLen int - if buf != nil { - bufLen = buf.Len() - } - n -= bufLen - if n == 0 { - err = nil - } - if err != nil { - if bufLen > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - data.Free() - return nil, err - } - data = append(data, buf) - } - return data, nil + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) } -// transportReader reads all the data available for this Stream from the transport and +// tranportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader *recvBufferReader + reader io.Reader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) ReadHeader(header []byte) (int, error) { - n, err := t.reader.ReadHeader(header) - if err != nil { - t.er = err - return 0, err - } - t.windowHandler(len(header)) - return n, nil -} - -func (t *transportReader) Read(n int) (mem.Buffer, error) { - buf, err := t.reader.Read(n) +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) if err != nil { t.er = err - return buf, err + return } - t.windowHandler(buf.Len()) - return buf, nil + t.windowHandler(n) + return } // BytesReceived indicates whether any bytes have been received on this stream. @@ -673,7 +574,6 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 - BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -712,8 +612,6 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool - // The mem.BufferPool to use when reading/writing to the wire. - BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -775,7 +673,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + Write(s *Stream, hdr []byte, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -827,7 +725,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + Write(s *Stream, hdr []byte, data []byte, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -900,7 +798,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indicate application + // errStreamDone is returned from write at the client side to indiacte application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go index 94aa375f83ec..b8b92a6cb550 100644 --- a/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go @@ -24,29 +24,35 @@ import ( "bytes" "encoding/json" "fmt" - "maps" "net/url" "os" - "slices" "strings" - "sync" "google.golang.org/grpc" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/bootstrap" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/encoding/protojson" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) const ( + // The "server_features" field in the bootstrap file contains a list of + // features supported by the server: + // - A value of "xds_v3" indicates that the server supports the v3 version of + // the xDS transport protocol. + // - A value of "ignore_resource_deletion" indicates that the client should + // ignore deletion of Listener and Cluster resources in updates from the + // server. + serverFeaturesV3 = "xds_v3" serverFeaturesIgnoreResourceDeletion = "ignore_resource_deletion" - gRPCUserAgentName = "gRPC Go" - clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" - clientFeatureResourceWrapper = "xds.config.resource-in-sotw" + + gRPCUserAgentName = "gRPC Go" + clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" + clientFeatureResourceWrapper = "xds.config.resource-in-sotw" ) // For overriding in unit tests. @@ -54,15 +60,12 @@ var bootstrapFileReadFunc = os.ReadFile // ChannelCreds contains the credentials to be used while communicating with an // xDS server. It is also used to dedup servers with the same server URI. -// -// This type does not implement custom JSON marshal/unmarshal logic because it -// is straightforward to accomplish the same with json struct tags. type ChannelCreds struct { // Type contains a unique name identifying the credentials type. The only // supported types currently are "google_default" and "insecure". - Type string `json:"type,omitempty"` + Type string // Config contains the JSON configuration associated with the credentials. - Config json.RawMessage `json:"config,omitempty"` + Config json.RawMessage } // Equal reports whether cc and other are considered equal. @@ -84,158 +87,50 @@ func (cc ChannelCreds) String() string { return cc.Type + "-" + string(b) } -// ServerConfigs represents a collection of server configurations. -type ServerConfigs []*ServerConfig - -// Equal returns true if scs equals other. -func (scs *ServerConfigs) Equal(other *ServerConfigs) bool { - if len(*scs) != len(*other) { - return false - } - for i := range *scs { - if !(*scs)[i].Equal((*other)[i]) { - return false - } - } - return true -} - -// UnmarshalJSON takes the json data (a list of server configurations) and -// unmarshals it to the struct. -func (scs *ServerConfigs) UnmarshalJSON(data []byte) error { - servers := []*ServerConfig{} - if err := json.Unmarshal(data, &servers); err != nil { - return fmt.Errorf("xds: failed to JSON unmarshal server configurations during bootstrap: %v, config:\n%s", err, string(data)) - } - // Only use the first server config if fallback support is disabled. - if !envconfig.XDSFallbackSupport { - if len(servers) > 1 { - servers = servers[:1] - } - } - *scs = servers - return nil -} - -// Authority contains configuration for an xDS control plane authority. +// ServerConfig contains the configuration to connect to a server, including +// URI, creds, and transport API version (e.g. v2 or v3). // -// This type does not implement custom JSON marshal/unmarshal logic because it -// is straightforward to accomplish the same with json struct tags. -type Authority struct { - // ClientListenerResourceNameTemplate is template for the name of the - // Listener resource to subscribe to for a gRPC client channel. Used only - // when the channel is created using an "xds:" URI with this authority name. - // - // The token "%s", if present in this string, will be replaced - // with %-encoded service authority (i.e., the path part of the target - // URI used to create the gRPC channel). - // - // Must start with "xdstp:///". If it does not, - // that is considered a bootstrap file parsing error. - // - // If not present in the bootstrap file, defaults to - // "xdstp:///envoy.config.listener.v3.Listener/%s". - ClientListenerResourceNameTemplate string `json:"client_listener_resource_name_template,omitempty"` - // XDSServers contains the list of server configurations for this authority. - XDSServers ServerConfigs `json:"xds_servers,omitempty"` -} - -// Equal returns true if a equals other. -func (a *Authority) Equal(other *Authority) bool { - switch { - case a == nil && other == nil: - return true - case (a != nil) != (other != nil): - return false - case a.ClientListenerResourceNameTemplate != other.ClientListenerResourceNameTemplate: - return false - case !a.XDSServers.Equal(&other.XDSServers): - return false - } - return true -} - -// ServerConfig contains the configuration to connect to a server. +// It contains unexported fields that are initialized when unmarshaled from JSON +// using either the UnmarshalJSON() method or the ServerConfigFromJSON() +// function. Hence users are strongly encouraged not to use a literal struct +// initialization to create an instance of this type, but instead unmarshal from +// JSON using one of the two available options. type ServerConfig struct { - serverURI string - channelCreds []ChannelCreds - serverFeatures []string + // ServerURI is the management server to connect to. + // + // The bootstrap file contains an ordered list of xDS servers to contact for + // this authority. The first one is picked. + ServerURI string + // Creds contains the credentials to be used while communicationg with this + // xDS server. It is also used to dedup servers with the same server URI. + Creds ChannelCreds + // ServerFeatures contains a list of features supported by this xDS server. + // It is also used to dedup servers with the same server URI and creds. + ServerFeatures []string // As part of unmarshalling the JSON config into this struct, we ensure that // the credentials config is valid by building an instance of the specified - // credentials and store it here for easy access. - selectedCreds ChannelCreds + // credentials and store it here as a grpc.DialOption for easy access when + // dialing this xDS server. credsDialOption grpc.DialOption - cleanups []func() -} - -// ServerURI returns the URI of the management server to connect to. -func (sc *ServerConfig) ServerURI() string { - return sc.serverURI -} - -// ChannelCreds returns the credentials configuration to use when communicating -// with this server. Also used to dedup servers with the same server URI. -func (sc *ServerConfig) ChannelCreds() []ChannelCreds { - return sc.channelCreds -} + // IgnoreResourceDeletion controls the behavior of the xDS client when the + // server deletes a previously sent Listener or Cluster resource. If set, the + // xDS client will not invoke the watchers' OnResourceDoesNotExist() method + // when a resource is deleted, nor will it remove the existing resource value + // from its cache. + IgnoreResourceDeletion bool -// ServerFeatures returns the list of features supported by this server. Also -// used to dedup servers with the same server URI and channel creds. -func (sc *ServerConfig) ServerFeatures() []string { - return sc.serverFeatures -} - -// ServerFeaturesIgnoreResourceDeletion returns true if this server supports a -// feature where the xDS client can ignore resource deletions from this server, -// as described in gRFC A53. -// -// This feature controls the behavior of the xDS client when the server deletes -// a previously sent Listener or Cluster resource. If set, the xDS client will -// not invoke the watchers' OnResourceDoesNotExist() method when a resource is -// deleted, nor will it remove the existing resource value from its cache. -func (sc *ServerConfig) ServerFeaturesIgnoreResourceDeletion() bool { - for _, sf := range sc.serverFeatures { - if sf == serverFeaturesIgnoreResourceDeletion { - return true - } - } - return false + // Cleanups are called when the xDS client for this server is closed. Allows + // cleaning up resources created specifically for this ServerConfig. + Cleanups []func() } -// CredsDialOption returns the first supported transport credentials from the -// configuration, as a dial option. +// CredsDialOption returns the configured credentials as a grpc dial option. func (sc *ServerConfig) CredsDialOption() grpc.DialOption { return sc.credsDialOption } -// Cleanups returns a collection of functions to be called when the xDS client -// for this server is closed. Allows cleaning up resources created specifically -// for this server. -func (sc *ServerConfig) Cleanups() []func() { - return sc.cleanups -} - -// Equal reports whether sc and other are considered equal. -func (sc *ServerConfig) Equal(other *ServerConfig) bool { - switch { - case sc == nil && other == nil: - return true - case (sc != nil) != (other != nil): - return false - case sc.serverURI != other.serverURI: - return false - case !slices.EqualFunc(sc.channelCreds, other.channelCreds, func(a, b ChannelCreds) bool { return a.Equal(b) }): - return false - case !slices.Equal(sc.serverFeatures, other.serverFeatures): - return false - case !sc.selectedCreds.Equal(other.selectedCreds): - return false - } - return true -} - // String returns the string representation of the ServerConfig. // // This string representation will be used as map keys in federation @@ -246,41 +141,38 @@ func (sc *ServerConfig) Equal(other *ServerConfig) bool { // content. It doesn't cover NodeProto because NodeProto isn't used by // federation. func (sc *ServerConfig) String() string { - if len(sc.serverFeatures) == 0 { - return fmt.Sprintf("%s-%s", sc.serverURI, sc.selectedCreds.String()) - } - features := strings.Join(sc.serverFeatures, "-") - return strings.Join([]string{sc.serverURI, sc.selectedCreds.String(), features}, "-") -} - -// The following fields correspond 1:1 with the JSON schema for ServerConfig. -type serverConfigJSON struct { - ServerURI string `json:"server_uri,omitempty"` - ChannelCreds []ChannelCreds `json:"channel_creds,omitempty"` - ServerFeatures []string `json:"server_features,omitempty"` + features := strings.Join(sc.ServerFeatures, "-") + return strings.Join([]string{sc.ServerURI, sc.Creds.String(), features}, "-") } -// MarshalJSON returns marshaled JSON bytes corresponding to this server config. -func (sc *ServerConfig) MarshalJSON() ([]byte, error) { - server := &serverConfigJSON{ - ServerURI: sc.serverURI, - ChannelCreds: sc.channelCreds, - ServerFeatures: sc.serverFeatures, +// MarshalJSON marshals the ServerConfig to json. +func (sc ServerConfig) MarshalJSON() ([]byte, error) { + server := xdsServer{ + ServerURI: sc.ServerURI, + ChannelCreds: []channelCreds{{Type: sc.Creds.Type, Config: sc.Creds.Config}}, + ServerFeatures: sc.ServerFeatures, + } + server.ServerFeatures = []string{serverFeaturesV3} + if sc.IgnoreResourceDeletion { + server.ServerFeatures = append(server.ServerFeatures, serverFeaturesIgnoreResourceDeletion) } return json.Marshal(server) } // UnmarshalJSON takes the json data (a server) and unmarshals it to the struct. func (sc *ServerConfig) UnmarshalJSON(data []byte) error { - server := serverConfigJSON{} + var server xdsServer if err := json.Unmarshal(data, &server); err != nil { - return fmt.Errorf("xds: failed to JSON unmarshal server configuration during bootstrap: %v, config:\n%s", err, string(data)) + return fmt.Errorf("xds: json.Unmarshal(data) for field ServerConfig failed during bootstrap: %v", err) } - sc.serverURI = server.ServerURI - sc.channelCreds = server.ChannelCreds - sc.serverFeatures = server.ServerFeatures - + sc.ServerURI = server.ServerURI + sc.ServerFeatures = server.ServerFeatures + for _, f := range server.ServerFeatures { + if f == serverFeaturesIgnoreResourceDeletion { + sc.IgnoreResourceDeletion = true + } + } for _, cc := range server.ChannelCreds { // We stop at the first credential type that we support. c := bootstrap.GetCredentials(cc.Type) @@ -291,536 +183,351 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err) } - sc.selectedCreds = cc + sc.Creds = ChannelCreds(cc) sc.credsDialOption = grpc.WithCredentialsBundle(bundle) - sc.cleanups = append(sc.cleanups, cancel) + sc.Cleanups = append(sc.Cleanups, cancel) break } - if sc.serverURI == "" { - return fmt.Errorf("xds: `server_uri` field in server config cannot be empty: %s", string(data)) - } - if sc.credsDialOption == nil { - return fmt.Errorf("xds: `channel_creds` field in server config cannot be empty: %s", string(data)) - } return nil } -// ServerConfigTestingOptions specifies options for creating a new ServerConfig -// for testing purposes. -// -// # Testing-Only -type ServerConfigTestingOptions struct { - // URI is the name of the server corresponding to this server config. - URI string - // ChannelCreds contains a list of channel credentials to use when talking - // to this server. If unspecified, `insecure` credentials will be used. - ChannelCreds []ChannelCreds - // ServerFeatures represents the list of features supported by this server. - ServerFeatures []string -} - -// ServerConfigForTesting creates a new ServerConfig from the passed in options, -// for testing purposes. -// -// # Testing-Only -func ServerConfigForTesting(opts ServerConfigTestingOptions) (*ServerConfig, error) { - cc := opts.ChannelCreds - if cc == nil { - cc = []ChannelCreds{{Type: "insecure"}} - } - scInternal := &serverConfigJSON{ - ServerURI: opts.URI, - ChannelCreds: cc, - ServerFeatures: opts.ServerFeatures, - } - scJSON, err := json.Marshal(scInternal) - if err != nil { - return nil, err - } - +// ServerConfigFromJSON creates a new ServerConfig from the given JSON +// configuration. This is the preferred way of creating a ServerConfig when +// hand-crafting the JSON configuration. +func ServerConfigFromJSON(data []byte) (*ServerConfig, error) { sc := new(ServerConfig) - if err := sc.UnmarshalJSON(scJSON); err != nil { + if err := sc.UnmarshalJSON(data); err != nil { return nil, err } return sc, nil } -// Config is the internal representation of the bootstrap configuration provided -// to the xDS client. -type Config struct { - xDSServers ServerConfigs - cpcs map[string]certproviderNameAndConfig - serverListenerResourceNameTemplate string - clientDefaultListenerResourceNameTemplate string - authorities map[string]*Authority - node node - - // A map from certprovider instance names to parsed buildable configs. - certProviderConfigs map[string]*certprovider.BuildableConfig -} - -// XDSServers returns the top-level list of management servers to connect to, -// ordered by priority. -func (c *Config) XDSServers() []*ServerConfig { - return c.xDSServers -} - -// CertProviderConfigs returns a map from certificate provider plugin instance -// name to their configuration. Callers must not modify the returned map. -func (c *Config) CertProviderConfigs() map[string]*certprovider.BuildableConfig { - return c.certProviderConfigs -} - -// ServerListenerResourceNameTemplate returns template for the name of the -// Listener resource to subscribe to for a gRPC server. -// -// If starts with "xdstp:", will be interpreted as a new-style name, -// in which case the authority of the URI will be used to select the -// relevant configuration in the "authorities" map. -// -// The token "%s", if present in this string, will be replaced with the IP -// and port on which the server is listening. (e.g., "0.0.0.0:8080", -// "[::]:8080"). For example, a value of "example/resource/%s" could become -// "example/resource/0.0.0.0:8080". If the template starts with "xdstp:", -// the replaced string will be %-encoded. -// -// There is no default; if unset, xDS-based server creation fails. -func (c *Config) ServerListenerResourceNameTemplate() string { - return c.serverListenerResourceNameTemplate -} - -// ClientDefaultListenerResourceNameTemplate returns a template for the name of -// the Listener resource to subscribe to for a gRPC client channel. Used only -// when the channel is created with an "xds:" URI with no authority. -// -// If starts with "xdstp:", will be interpreted as a new-style name, -// in which case the authority of the URI will be used to select the -// relevant configuration in the "authorities" map. -// -// The token "%s", if present in this string, will be replaced with -// the service authority (i.e., the path part of the target URI -// used to create the gRPC channel). If the template starts with -// "xdstp:", the replaced string will be %-encoded. -// -// Defaults to "%s". -func (c *Config) ClientDefaultListenerResourceNameTemplate() string { - return c.clientDefaultListenerResourceNameTemplate -} - -// Authorities returns a map of authority name to corresponding configuration. -// Callers must not modify the returned map. -// -// This is used in the following cases: -// - A gRPC client channel is created using an "xds:" URI that includes -// an authority. -// - A gRPC client channel is created using an "xds:" URI with no -// authority, but the "client_default_listener_resource_name_template" -// field above turns it into an "xdstp:" URI. -// - A gRPC server is created and the -// "server_listener_resource_name_template" field is an "xdstp:" URI. -// -// In any of those cases, it is an error if the specified authority is -// not present in this map. -func (c *Config) Authorities() map[string]*Authority { - return c.authorities -} - -// Node returns xDS a v3 Node proto corresponding to the node field in the -// bootstrap configuration, which identifies a specific gRPC instance. -func (c *Config) Node() *v3corepb.Node { - return c.node.toProto() -} - -// Equal returns true if c equals other. -func (c *Config) Equal(other *Config) bool { +// Equal reports whether sc and other are considered equal. +func (sc *ServerConfig) Equal(other *ServerConfig) bool { switch { - case c == nil && other == nil: + case sc == nil && other == nil: return true - case (c != nil) != (other != nil): - return false - case !c.xDSServers.Equal(&other.xDSServers): - return false - case !maps.EqualFunc(c.certProviderConfigs, other.certProviderConfigs, func(a, b *certprovider.BuildableConfig) bool { return a.String() == b.String() }): - return false - case c.serverListenerResourceNameTemplate != other.serverListenerResourceNameTemplate: + case (sc != nil) != (other != nil): return false - case c.clientDefaultListenerResourceNameTemplate != other.clientDefaultListenerResourceNameTemplate: + case sc.ServerURI != other.ServerURI: return false - case !maps.EqualFunc(c.authorities, other.authorities, func(a, b *Authority) bool { return a.Equal(b) }): + case !sc.Creds.Equal(other.Creds): return false - case !c.node.Equal(other.node): + case !equalStringSlice(sc.ServerFeatures, other.ServerFeatures): return false } return true } -// String returns a string representation of the Config. -func (c *Config) String() string { - s, _ := c.MarshalJSON() - return string(s) -} - -// The following fields correspond 1:1 with the JSON schema for Config. -type configJSON struct { - XDSServers ServerConfigs `json:"xds_servers,omitempty"` - CertificateProviders map[string]certproviderNameAndConfig `json:"certificate_providers,omitempty"` - ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` - ClientDefaultListenerResourceNameTemplate string `json:"client_default_listener_resource_name_template,omitempty"` - Authorities map[string]*Authority `json:"authorities,omitempty"` - Node node `json:"node,omitempty"` -} - -// MarshalJSON returns marshaled JSON bytes corresponding to this config. -func (c *Config) MarshalJSON() ([]byte, error) { - config := &configJSON{ - XDSServers: c.xDSServers, - CertificateProviders: c.cpcs, - ServerListenerResourceNameTemplate: c.serverListenerResourceNameTemplate, - ClientDefaultListenerResourceNameTemplate: c.clientDefaultListenerResourceNameTemplate, - Authorities: c.authorities, - Node: c.node, +func equalStringSlice(a, b []string) bool { + if len(a) != len(b) { + return false } - return json.MarshalIndent(config, " ", " ") + for i := range a { + if a[i] != b[i] { + return false + } + } + return true } -// UnmarshalJSON takes the json data (the complete bootstrap configuration) and -// unmarshals it to the struct. -func (c *Config) UnmarshalJSON(data []byte) error { - // Initialize the node field with client controlled values. This ensures - // even if the bootstrap configuration did not contain the node field, we - // will have a node field with client controlled fields alone. - config := configJSON{Node: newNode()} - if err := json.Unmarshal(data, &config); err != nil { - return fmt.Errorf("xds: json.Unmarshal(%s) failed during bootstrap: %v", string(data), err) +// unmarshalJSONServerConfigSlice unmarshals JSON to a slice. +func unmarshalJSONServerConfigSlice(data []byte) ([]*ServerConfig, error) { + var servers []*ServerConfig + if err := json.Unmarshal(data, &servers); err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON to []*ServerConfig: %v", err) } - - c.xDSServers = config.XDSServers - c.cpcs = config.CertificateProviders - c.serverListenerResourceNameTemplate = config.ServerListenerResourceNameTemplate - c.clientDefaultListenerResourceNameTemplate = config.ClientDefaultListenerResourceNameTemplate - c.authorities = config.Authorities - c.node = config.Node - - // Build the certificate providers configuration to ensure that it is valid. - cpcCfgs := make(map[string]*certprovider.BuildableConfig) - getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder) - for instance, nameAndConfig := range c.cpcs { - name := nameAndConfig.PluginName - parser := getBuilder(nameAndConfig.PluginName) - if parser == nil { - // We ignore plugins that we do not know about. - continue - } - bc, err := parser.ParseConfig(nameAndConfig.Config) - if err != nil { - return fmt.Errorf("xds: config parsing for certificate provider plugin %q failed during bootstrap: %v", name, err) - } - cpcCfgs[instance] = bc + if len(servers) < 1 { + return nil, fmt.Errorf("no management server found in JSON") } - c.certProviderConfigs = cpcCfgs + return servers, nil +} - // Default value of the default client listener name template is "%s". - if c.clientDefaultListenerResourceNameTemplate == "" { - c.clientDefaultListenerResourceNameTemplate = "%s" - } - if len(c.xDSServers) == 0 { - return fmt.Errorf("xds: required field `xds_servers` not found in bootstrap configuration: %s", string(data)) +// Authority contains configuration for an Authority for an xDS control plane +// server. See the Authorities field in the Config struct for how it's used. +type Authority struct { + // ClientListenerResourceNameTemplate is template for the name of the + // Listener resource to subscribe to for a gRPC client channel. Used only + // when the channel is created using an "xds:" URI with this authority name. + // + // The token "%s", if present in this string, will be replaced + // with %-encoded service authority (i.e., the path part of the target + // URI used to create the gRPC channel). + // + // Must start with "xdstp:///". If it does not, + // that is considered a bootstrap file parsing error. + // + // If not present in the bootstrap file, defaults to + // "xdstp:///envoy.config.listener.v3.Listener/%s". + ClientListenerResourceNameTemplate string + // XDSServer contains the management server and config to connect to for + // this authority. + XDSServer *ServerConfig +} + +// UnmarshalJSON implement json unmarshaller. +func (a *Authority) UnmarshalJSON(data []byte) error { + var jsonData map[string]json.RawMessage + if err := json.Unmarshal(data, &jsonData); err != nil { + return fmt.Errorf("xds: failed to parse authority: %v", err) } - // Post-process the authorities' client listener resource template field: - // - if set, it must start with "xdstp:///" - // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" - for name, authority := range c.authorities { - prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name)) - if authority.ClientListenerResourceNameTemplate == "" { - authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" - continue - } - if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) { - return fmt.Errorf("xds: field clientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix) + for k, v := range jsonData { + switch k { + case "xds_servers": + servers, err := unmarshalJSONServerConfigSlice(v) + if err != nil { + return fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err) + } + a.XDSServer = servers[0] + case "client_listener_resource_name_template": + if err := json.Unmarshal(v, &a.ClientListenerResourceNameTemplate); err != nil { + return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } } } return nil } -// GetConfiguration returns the bootstrap configuration initialized by reading -// the bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents -// specified at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the -// former is preferred. +// Config provides the xDS client with several key bits of information that it +// requires in its interaction with the management server. The Config is +// initialized from the bootstrap file. // -// If none of the env vars are set, this function returns the fallback -// configuration if it is not nil. Else, it returns an error. -// -// This function tries to process as much of the bootstrap file as possible (in -// the presence of the errors) and may return a Config object with certain -// fields left unspecified, in which case the caller should use some sane -// defaults. -func GetConfiguration() (*Config, error) { +// Users must use one of the NewConfigXxx() functions to create a Config +// instance, and not initialize it manually. +type Config struct { + // XDSServer is the management server to connect to. + // + // The bootstrap file contains a list of servers (with name+creds), but we + // pick the first one. + XDSServer *ServerConfig + // CertProviderConfigs contains a mapping from certificate provider plugin + // instance names to parsed buildable configs. + CertProviderConfigs map[string]*certprovider.BuildableConfig + // ServerListenerResourceNameTemplate is a template for the name of the + // Listener resource to subscribe to for a gRPC server. + // + // If starts with "xdstp:", will be interpreted as a new-style name, + // in which case the authority of the URI will be used to select the + // relevant configuration in the "authorities" map. + // + // The token "%s", if present in this string, will be replaced with the IP + // and port on which the server is listening. (e.g., "0.0.0.0:8080", + // "[::]:8080"). For example, a value of "example/resource/%s" could become + // "example/resource/0.0.0.0:8080". If the template starts with "xdstp:", + // the replaced string will be %-encoded. + // + // There is no default; if unset, xDS-based server creation fails. + ServerListenerResourceNameTemplate string + // A template for the name of the Listener resource to subscribe to + // for a gRPC client channel. Used only when the channel is created + // with an "xds:" URI with no authority. + // + // If starts with "xdstp:", will be interpreted as a new-style name, + // in which case the authority of the URI will be used to select the + // relevant configuration in the "authorities" map. + // + // The token "%s", if present in this string, will be replaced with + // the service authority (i.e., the path part of the target URI + // used to create the gRPC channel). If the template starts with + // "xdstp:", the replaced string will be %-encoded. + // + // Defaults to "%s". + ClientDefaultListenerResourceNameTemplate string + // Authorities is a map of authority name to corresponding configuration. + // + // This is used in the following cases: + // - A gRPC client channel is created using an "xds:" URI that includes + // an authority. + // - A gRPC client channel is created using an "xds:" URI with no + // authority, but the "client_default_listener_resource_name_template" + // field above turns it into an "xdstp:" URI. + // - A gRPC server is created and the + // "server_listener_resource_name_template" field is an "xdstp:" URI. + // + // In any of those cases, it is an error if the specified authority is + // not present in this map. + Authorities map[string]*Authority + // NodeProto contains the Node proto to be used in xDS requests. This will be + // of type *v3corepb.Node. + NodeProto *v3corepb.Node +} + +type channelCreds struct { + Type string `json:"type"` + Config json.RawMessage `json:"config,omitempty"` +} + +type xdsServer struct { + ServerURI string `json:"server_uri"` + ChannelCreds []channelCreds `json:"channel_creds"` + ServerFeatures []string `json:"server_features"` +} + +func bootstrapConfigFromEnvVariable() ([]byte, error) { fName := envconfig.XDSBootstrapFileName fContent := envconfig.XDSBootstrapFileContent + // Bootstrap file name has higher priority than bootstrap content. if fName != "" { - if logger.V(2) { - logger.Infof("Using bootstrap file with name %q from GRPC_XDS_BOOTSTRAP environment variable", fName) - } - cfg, err := bootstrapFileReadFunc(fName) - if err != nil { - return nil, fmt.Errorf("xds: failed to read bootstrap config from file %q: %v", fName, err) - } - return newConfigFromContents(cfg) + // If file name is set + // - If file not found (or other errors), fail + // - Otherwise, use the content. + // + // Note that even if the content is invalid, we don't failover to the + // file content env variable. + logger.Debugf("Using bootstrap file with name %q", fName) + return bootstrapFileReadFunc(fName) } if fContent != "" { - if logger.V(2) { - logger.Infof("Using bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG environment variable") - } - return newConfigFromContents([]byte(fContent)) - } - - if cfg := fallbackBootstrapConfig(); cfg != nil { - if logger.V(2) { - logger.Infof("Using bootstrap contents from fallback config") - } - return cfg, nil + return []byte(fContent), nil } - return nil, fmt.Errorf("bootstrap environment variables (%q or %q) not defined, and no fallback config set", envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv) + return nil, fmt.Errorf("none of the bootstrap environment variables (%q or %q) defined", + envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv) } -func newConfigFromContents(data []byte) (*Config, error) { - // Normalize the input configuration. - buf := bytes.Buffer{} - err := json.Indent(&buf, data, "", "") - if err != nil { - return nil, fmt.Errorf("xds: error normalizing JSON bootstrap configuration: %v", err) - } - data = bytes.TrimSpace(buf.Bytes()) - - config := &Config{} - if err := config.UnmarshalJSON(data); err != nil { - return nil, err - } - return config, nil -} - -// ConfigOptionsForTesting specifies options for creating a new bootstrap -// configuration for testing purposes. +// NewConfig returns a new instance of Config initialized by reading the +// bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents specified +// at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the former is +// preferred. // -// # Testing-Only -type ConfigOptionsForTesting struct { - // Servers is the top-level xDS server configuration - Servers []json.RawMessage - // CertificateProviders is the certificate providers configuration. - CertificateProviders map[string]json.RawMessage - // ServerListenerResourceNameTemplate is the listener resource name template - // to be used on the gRPC server. - ServerListenerResourceNameTemplate string - // ClientDefaultListenerResourceNameTemplate is the default listener - // resource name template to be used on the gRPC client. - ClientDefaultListenerResourceNameTemplate string - // Authorities is a list of non-default authorities. - Authorities map[string]json.RawMessage - // Node identifies the gRPC client/server node in the - // proxyless service mesh. - Node json.RawMessage -} - -// NewContentsForTesting creates a new bootstrap configuration from the passed in -// options, for testing purposes. +// We support a credential registration mechanism and only credentials +// registered through that mechanism will be accepted here. See package +// `xds/bootstrap` for details. // -// # Testing-Only -func NewContentsForTesting(opts ConfigOptionsForTesting) ([]byte, error) { - var servers []*ServerConfig - for _, serverCfgJSON := range opts.Servers { - server := &ServerConfig{} - if err := server.UnmarshalJSON(serverCfgJSON); err != nil { - return nil, err - } - servers = append(servers, server) - } - certProviders := make(map[string]certproviderNameAndConfig) - for k, v := range opts.CertificateProviders { - cp := certproviderNameAndConfig{} - if err := json.Unmarshal(v, &cp); err != nil { - return nil, fmt.Errorf("failed to unmarshal certificate provider configuration for %s: %s", k, string(v)) - } - certProviders[k] = cp - } - authorities := make(map[string]*Authority) - for k, v := range opts.Authorities { - a := &Authority{} - if err := json.Unmarshal(v, a); err != nil { - return nil, fmt.Errorf("failed to unmarshal authority configuration for %s: %s", k, string(v)) - } - authorities[k] = a - } - node := newNode() - if err := json.Unmarshal(opts.Node, &node); err != nil { - return nil, fmt.Errorf("failed to unmarshal node configuration %s: %v", string(opts.Node), err) - } - cfgJSON := configJSON{ - XDSServers: servers, - CertificateProviders: certProviders, - ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, - ClientDefaultListenerResourceNameTemplate: opts.ClientDefaultListenerResourceNameTemplate, - Authorities: authorities, - Node: node, - } - contents, err := json.MarshalIndent(cfgJSON, " ", " ") +// This function tries to process as much of the bootstrap file as possible (in +// the presence of the errors) and may return a Config object with certain +// fields left unspecified, in which case the caller should use some sane +// defaults. +func NewConfig() (*Config, error) { + // Examples of the bootstrap json can be found in the generator tests + // https://github.com/GoogleCloudPlatform/traffic-director-grpc-bootstrap/blob/master/main_test.go. + data, err := bootstrapConfigFromEnvVariable() if err != nil { - return nil, fmt.Errorf("failed to marshal bootstrap configuration for provided options %+v: %v", opts, err) + return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) } - return contents, nil -} - -// NewConfigForTesting creates a new bootstrap configuration from the provided -// contents, for testing purposes. -// -// # Testing-Only -func NewConfigForTesting(contents []byte) (*Config, error) { - return newConfigFromContents(contents) -} - -// certproviderNameAndConfig is the internal representation of -// the`certificate_providers` field in the bootstrap configuration. -type certproviderNameAndConfig struct { - PluginName string `json:"plugin_name"` - Config json.RawMessage `json:"config"` -} - -// locality is the internal representation of the locality field within node. -type locality struct { - Region string `json:"region,omitempty"` - Zone string `json:"zone,omitempty"` - SubZone string `json:"sub_zone,omitempty"` -} - -func (l locality) Equal(other locality) bool { - return l.Region == other.Region && l.Zone == other.Zone && l.SubZone == other.SubZone + return newConfigFromContents(data) } -func (l locality) isEmpty() bool { - return l.Equal(locality{}) +// NewConfigFromContents returns a new Config using the specified +// bootstrap file contents instead of reading the environment variable. +func NewConfigFromContents(data []byte) (*Config, error) { + return newConfigFromContents(data) } -type userAgentVersion struct { - UserAgentVersion string `json:"user_agent_version,omitempty"` -} - -// node is the internal representation of the node field in the bootstrap -// configuration. -type node struct { - ID string `json:"id,omitempty"` - Cluster string `json:"cluster,omitempty"` - Locality locality `json:"locality,omitempty"` - Metadata *structpb.Struct `json:"metadata,omitempty"` - - // The following fields are controlled by the client implementation and - // should not unmarshaled from JSON. - userAgentName string - userAgentVersionType userAgentVersion - clientFeatures []string -} +func newConfigFromContents(data []byte) (*Config, error) { + config := &Config{} -// newNode is a convenience function to create a new node instance with fields -// controlled by the client implementation set to the desired values. -func newNode() node { - return node{ - userAgentName: gRPCUserAgentName, - userAgentVersionType: userAgentVersion{UserAgentVersion: grpc.Version}, - clientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, + var jsonData map[string]json.RawMessage + if err := json.Unmarshal(data, &jsonData); err != nil { + return nil, fmt.Errorf("xds: failed to parse bootstrap config: %v", err) } -} -func (n node) Equal(other node) bool { - switch { - case n.ID != other.ID: - return false - case n.Cluster != other.Cluster: - return false - case !n.Locality.Equal(other.Locality): - return false - case n.userAgentName != other.userAgentName: - return false - case n.userAgentVersionType != other.userAgentVersionType: - return false + var node *v3corepb.Node + opts := protojson.UnmarshalOptions{DiscardUnknown: true} + for k, v := range jsonData { + switch k { + case "node": + node = &v3corepb.Node{} + if err := opts.Unmarshal(v, node); err != nil { + return nil, fmt.Errorf("xds: protojson.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + case "xds_servers": + servers, err := unmarshalJSONServerConfigSlice(v) + if err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err) + } + config.XDSServer = servers[0] + case "certificate_providers": + var providerInstances map[string]json.RawMessage + if err := json.Unmarshal(v, &providerInstances); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + configs := make(map[string]*certprovider.BuildableConfig) + getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder) + for instance, data := range providerInstances { + var nameAndConfig struct { + PluginName string `json:"plugin_name"` + Config json.RawMessage `json:"config"` + } + if err := json.Unmarshal(data, &nameAndConfig); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), instance, err) + } + + name := nameAndConfig.PluginName + parser := getBuilder(nameAndConfig.PluginName) + if parser == nil { + // We ignore plugins that we do not know about. + continue + } + bc, err := parser.ParseConfig(nameAndConfig.Config) + if err != nil { + return nil, fmt.Errorf("xds: config parsing for plugin %q failed: %v", name, err) + } + configs[instance] = bc + } + config.CertProviderConfigs = configs + case "server_listener_resource_name_template": + if err := json.Unmarshal(v, &config.ServerListenerResourceNameTemplate); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + case "client_default_listener_resource_name_template": + if err := json.Unmarshal(v, &config.ClientDefaultListenerResourceNameTemplate); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + case "authorities": + if err := json.Unmarshal(v, &config.Authorities); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + default: + logger.Warningf("Bootstrap content has unknown field: %s", k) + } + // Do not fail the xDS bootstrap when an unknown field is seen. This can + // happen when an older version client reads a newer version bootstrap + // file with new fields. } - // Consider failures in JSON marshaling as being unable to perform the - // comparison, and hence return false. - nMetadata, err := n.Metadata.MarshalJSON() - if err != nil { - return false + if config.ClientDefaultListenerResourceNameTemplate == "" { + // Default value of the default client listener name template is "%s". + config.ClientDefaultListenerResourceNameTemplate = "%s" } - otherMetadata, err := other.Metadata.MarshalJSON() - if err != nil { - return false + if config.XDSServer == nil { + return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers", jsonData["xds_servers"]) } - if !bytes.Equal(nMetadata, otherMetadata) { - return false + if config.XDSServer.ServerURI == "" { + return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"]) } - - return slices.Equal(n.clientFeatures, other.clientFeatures) -} - -func (n node) toProto() *v3corepb.Node { - return &v3corepb.Node{ - Id: n.ID, - Cluster: n.Cluster, - Locality: func() *v3corepb.Locality { - if n.Locality.isEmpty() { - return nil - } - return &v3corepb.Locality{ - Region: n.Locality.Region, - Zone: n.Locality.Zone, - SubZone: n.Locality.SubZone, - } - }(), - Metadata: proto.Clone(n.Metadata).(*structpb.Struct), - UserAgentName: n.userAgentName, - UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: n.userAgentVersionType.UserAgentVersion}, - ClientFeatures: slices.Clone(n.clientFeatures), + if config.XDSServer.CredsDialOption() == nil { + return nil, fmt.Errorf("xds: required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"]) } -} - -// SetFallbackBootstrapConfig sets the fallback bootstrap configuration to be -// used when the bootstrap environment variables are unset. -// -// The provided configuration must be valid JSON. Returns a non-nil error if -// parsing the provided configuration fails. -func SetFallbackBootstrapConfig(cfgJSON []byte) error { - config, err := newConfigFromContents(cfgJSON) - if err != nil { - return err + // Post-process the authorities' client listener resource template field: + // - if set, it must start with "xdstp:///" + // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" + for name, authority := range config.Authorities { + prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name)) + if authority.ClientListenerResourceNameTemplate == "" { + authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" + continue + } + if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) { + return nil, fmt.Errorf("xds: field ClientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix) + } } - configMu.Lock() - defer configMu.Unlock() - fallbackBootstrapCfg = config - return nil -} - -// UnsetFallbackBootstrapConfigForTesting unsets the fallback bootstrap -// configuration to be used when the bootstrap environment variables are unset. -// -// # Testing-Only -func UnsetFallbackBootstrapConfigForTesting() { - configMu.Lock() - defer configMu.Unlock() - fallbackBootstrapCfg = nil -} + // Performing post-production on the node information. Some additional fields + // which are not expected to be set in the bootstrap file are populated here. + if node == nil { + node = &v3corepb.Node{} + } + node.UserAgentName = gRPCUserAgentName + node.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} + node.ClientFeatures = append(node.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper) + config.NodeProto = node -// fallbackBootstrapConfig returns the fallback bootstrap configuration -// that will be used by the xDS client when the bootstrap environment -// variables are unset. -func fallbackBootstrapConfig() *Config { - configMu.Lock() - defer configMu.Unlock() - return fallbackBootstrapCfg + if logger.V(2) { + logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) + } + return config, nil } - -var ( - configMu sync.Mutex - fallbackBootstrapCfg *Config -) diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go deleted file mode 100644 index c37c58c0233e..000000000000 --- a/vendor/google.golang.org/grpc/mem/buffer_pool.go +++ /dev/null @@ -1,194 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package mem - -import ( - "sort" - "sync" - - "google.golang.org/grpc/internal" -) - -// BufferPool is a pool of buffers that can be shared and reused, resulting in -// decreased memory allocation. -type BufferPool interface { - // Get returns a buffer with specified length from the pool. - Get(length int) *[]byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -var defaultBufferPoolSizes = []int{ - 256, - 4 << 10, // 4KB (go page size) - 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) - 32 << 10, // 32KB (default buffer size for io.Copy) - 1 << 20, // 1MB -} - -var defaultBufferPool BufferPool - -func init() { - defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) - - internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { - defaultBufferPool = pool - } - - internal.SetBufferPoolingThresholdForTesting = func(threshold int) { - bufferPoolingThreshold = threshold - } -} - -// DefaultBufferPool returns the current default buffer pool. It is a BufferPool -// created with NewBufferPool that uses a set of default sizes optimized for -// expected workflows. -func DefaultBufferPool() BufferPool { - return defaultBufferPool -} - -// NewTieredBufferPool returns a BufferPool implementation that uses multiple -// underlying pools of the given pool sizes. -func NewTieredBufferPool(poolSizes ...int) BufferPool { - sort.Ints(poolSizes) - pools := make([]*sizedBufferPool, len(poolSizes)) - for i, s := range poolSizes { - pools[i] = newSizedBufferPool(s) - } - return &tieredBufferPool{ - sizedPools: pools, - } -} - -// tieredBufferPool implements the BufferPool interface with multiple tiers of -// buffer pools for different sizes of buffers. -type tieredBufferPool struct { - sizedPools []*sizedBufferPool - fallbackPool simpleBufferPool -} - -func (p *tieredBufferPool) Get(size int) *[]byte { - return p.getPool(size).Get(size) -} - -func (p *tieredBufferPool) Put(buf *[]byte) { - p.getPool(cap(*buf)).Put(buf) -} - -func (p *tieredBufferPool) getPool(size int) BufferPool { - poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { - return p.sizedPools[i].defaultSize >= size - }) - - if poolIdx == len(p.sizedPools) { - return &p.fallbackPool - } - - return p.sizedPools[poolIdx] -} - -// sizedBufferPool is a BufferPool implementation that is optimized for specific -// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size -// of 16kb and a sizedBufferPool can be configured to only return buffers with a -// capacity of 16kb. Note that however it does not support returning larger -// buffers and in fact panics if such a buffer is requested. Because of this, -// this BufferPool implementation is not meant to be used on its own and rather -// is intended to be embedded in a tieredBufferPool such that Get is only -// invoked when the required size is smaller than or equal to defaultSize. -type sizedBufferPool struct { - pool sync.Pool - defaultSize int -} - -func (p *sizedBufferPool) Get(size int) *[]byte { - buf := p.pool.Get().(*[]byte) - b := *buf - clear(b[:cap(b)]) - *buf = b[:size] - return buf -} - -func (p *sizedBufferPool) Put(buf *[]byte) { - if cap(*buf) < p.defaultSize { - // Ignore buffers that are too small to fit in the pool. Otherwise, when - // Get is called it will panic as it tries to index outside the bounds - // of the buffer. - return - } - p.pool.Put(buf) -} - -func newSizedBufferPool(size int) *sizedBufferPool { - return &sizedBufferPool{ - pool: sync.Pool{ - New: func() any { - buf := make([]byte, size) - return &buf - }, - }, - defaultSize: size, - } -} - -var _ BufferPool = (*simpleBufferPool)(nil) - -// simpleBufferPool is an implementation of the BufferPool interface that -// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to -// acquire a buffer from the pool but if that buffer is too small, it returns it -// to the pool and creates a new one. -type simpleBufferPool struct { - pool sync.Pool -} - -func (p *simpleBufferPool) Get(size int) *[]byte { - bs, ok := p.pool.Get().(*[]byte) - if ok && cap(*bs) >= size { - *bs = (*bs)[:size] - return bs - } - - // A buffer was pulled from the pool, but it is too small. Put it back in - // the pool and create one large enough. - if ok { - p.pool.Put(bs) - } - - b := make([]byte, size) - return &b -} - -func (p *simpleBufferPool) Put(buf *[]byte) { - p.pool.Put(buf) -} - -var _ BufferPool = NopBufferPool{} - -// NopBufferPool is a buffer pool that returns new buffers without pooling. -type NopBufferPool struct{} - -// Get returns a buffer with specified length from the pool. -func (NopBufferPool) Get(length int) *[]byte { - b := make([]byte, length) - return &b -} - -// Put returns a buffer to the pool. -func (NopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go deleted file mode 100644 index d7775cea623d..000000000000 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ /dev/null @@ -1,224 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package mem - -import ( - "compress/flate" - "io" -) - -// BufferSlice offers a means to represent data that spans one or more Buffer -// instances. A BufferSlice is meant to be immutable after creation, and methods -// like Ref create and return copies of the slice. This is why all methods have -// value receivers rather than pointer receivers. -// -// Note that any of the methods that read the underlying buffers such as Ref, -// Len or CopyTo etc., will panic if any underlying buffers have already been -// freed. It is recommended to not directly interact with any of the underlying -// buffers directly, rather such interactions should be mediated through the -// various methods on this type. -// -// By convention, any APIs that return (mem.BufferSlice, error) should reduce -// the burden on the caller by never returning a mem.BufferSlice that needs to -// be freed if the error is non-nil, unless explicitly stated. -type BufferSlice []Buffer - -// Len returns the sum of the length of all the Buffers in this slice. -// -// # Warning -// -// Invoking the built-in len on a BufferSlice will return the number of buffers -// in the slice, and *not* the value returned by this function. -func (s BufferSlice) Len() int { - var length int - for _, b := range s { - length += b.Len() - } - return length -} - -// Ref invokes Ref on each buffer in the slice. -func (s BufferSlice) Ref() { - for _, b := range s { - b.Ref() - } -} - -// Free invokes Buffer.Free() on each Buffer in the slice. -func (s BufferSlice) Free() { - for _, b := range s { - b.Free() - } -} - -// CopyTo copies each of the underlying Buffer's data into the given buffer, -// returning the number of bytes copied. Has the same semantics as the copy -// builtin in that it will copy as many bytes as it can, stopping when either dst -// is full or s runs out of data, returning the minimum of s.Len() and len(dst). -func (s BufferSlice) CopyTo(dst []byte) int { - off := 0 - for _, b := range s { - off += copy(dst[off:], b.ReadOnlyData()) - } - return off -} - -// Materialize concatenates all the underlying Buffer's data into a single -// contiguous buffer using CopyTo. -func (s BufferSlice) Materialize() []byte { - l := s.Len() - if l == 0 { - return nil - } - out := make([]byte, l) - s.CopyTo(out) - return out -} - -// MaterializeToBuffer functions like Materialize except that it writes the data -// to a single Buffer pulled from the given BufferPool. As a special case, if the -// input BufferSlice only actually has one Buffer, this function has nothing to -// do and simply returns said Buffer. -func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { - if len(s) == 1 { - s[0].Ref() - return s[0] - } - sLen := s.Len() - if sLen == 0 { - return emptyBuffer{} - } - buf := pool.Get(sLen) - s.CopyTo(*buf) - return NewBuffer(buf, pool) -} - -// Reader returns a new Reader for the input slice after taking references to -// each underlying buffer. -func (s BufferSlice) Reader() Reader { - s.Ref() - return &sliceReader{ - data: s, - len: s.Len(), - } -} - -// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface -// with other parts systems. It also provides an additional convenience method -// Remaining(), which returns the number of unread bytes remaining in the slice. -// Buffers will be freed as they are read. -type Reader interface { - flate.Reader - // Close frees the underlying BufferSlice and never returns an error. Subsequent - // calls to Read will return (0, io.EOF). - Close() error - // Remaining returns the number of unread bytes remaining in the slice. - Remaining() int -} - -type sliceReader struct { - data BufferSlice - len int - // The index into data[0].ReadOnlyData(). - bufferIdx int -} - -func (r *sliceReader) Remaining() int { - return r.len -} - -func (r *sliceReader) Close() error { - r.data.Free() - r.data = nil - r.len = 0 - return nil -} - -func (r *sliceReader) freeFirstBufferIfEmpty() bool { - if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { - return false - } - - r.data[0].Free() - r.data = r.data[1:] - r.bufferIdx = 0 - return true -} - -func (r *sliceReader) Read(buf []byte) (n int, _ error) { - if r.len == 0 { - return 0, io.EOF - } - - for len(buf) != 0 && r.len != 0 { - // Copy as much as possible from the first Buffer in the slice into the - // given byte slice. - data := r.data[0].ReadOnlyData() - copied := copy(buf, data[r.bufferIdx:]) - r.len -= copied // Reduce len by the number of bytes copied. - r.bufferIdx += copied // Increment the buffer index. - n += copied // Increment the total number of bytes read. - buf = buf[copied:] // Shrink the given byte slice. - - // If we have copied all the data from the first Buffer, free it and advance to - // the next in the slice. - r.freeFirstBufferIfEmpty() - } - - return n, nil -} - -func (r *sliceReader) ReadByte() (byte, error) { - if r.len == 0 { - return 0, io.EOF - } - - // There may be any number of empty buffers in the slice, clear them all until a - // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. - for r.freeFirstBufferIfEmpty() { - } - - b := r.data[0].ReadOnlyData()[r.bufferIdx] - r.len-- - r.bufferIdx++ - // Free the first buffer in the slice if the last byte was read - r.freeFirstBufferIfEmpty() - return b, nil -} - -var _ io.Writer = (*writer)(nil) - -type writer struct { - buffers *BufferSlice - pool BufferPool -} - -func (w *writer) Write(p []byte) (n int, err error) { - b := Copy(p, w.pool) - *w.buffers = append(*w.buffers, b) - return b.Len(), nil -} - -// NewWriter wraps the given BufferSlice and BufferPool to implement the -// io.Writer interface. Every call to Write copies the contents of the given -// buffer into a new Buffer pulled from the given pool and the Buffer is added to -// the given BufferSlice. -func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { - return &writer{buffers: buffers, pool: pool} -} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go deleted file mode 100644 index 975ceb71853d..000000000000 --- a/vendor/google.golang.org/grpc/mem/buffers.go +++ /dev/null @@ -1,252 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package mem provides utilities that facilitate memory reuse in byte slices -// that are used as buffers. -// -// # Experimental -// -// Notice: All APIs in this package are EXPERIMENTAL and may be changed or -// removed in a later release. -package mem - -import ( - "fmt" - "sync" - "sync/atomic" -) - -// A Buffer represents a reference counted piece of data (in bytes) that can be -// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be -// released by calling Free(), which invokes the free function given at creation -// only after all references are released. -// -// Note that a Buffer is not safe for concurrent access and instead each -// goroutine should use its own reference to the data, which can be acquired via -// a call to Ref(). -// -// Attempts to access the underlying data after releasing the reference to the -// Buffer will panic. -type Buffer interface { - // ReadOnlyData returns the underlying byte slice. Note that it is undefined - // behavior to modify the contents of this slice in any way. - ReadOnlyData() []byte - // Ref increases the reference counter for this Buffer. - Ref() - // Free decrements this Buffer's reference counter and frees the underlying - // byte slice if the counter reaches 0 as a result of this call. - Free() - // Len returns the Buffer's size. - Len() int - - split(n int) (left, right Buffer) - read(buf []byte) (int, Buffer) -} - -var ( - bufferPoolingThreshold = 1 << 10 - - bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} - refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} -) - -func IsBelowBufferPoolingThreshold(size int) bool { - return size <= bufferPoolingThreshold -} - -type buffer struct { - origData *[]byte - data []byte - refs *atomic.Int32 - pool BufferPool -} - -func newBuffer() *buffer { - return bufferObjectPool.Get().(*buffer) -} - -// NewBuffer creates a new Buffer from the given data, initializing the reference -// counter to 1. The data will then be returned to the given pool when all -// references to the returned Buffer are released. As a special case to avoid -// additional allocations, if the given buffer pool is nil, the returned buffer -// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the -// underlying data is never freed. -// -// Note that the backing array of the given data is not copied. -func NewBuffer(data *[]byte, pool BufferPool) Buffer { - if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { - return (SliceBuffer)(*data) - } - b := newBuffer() - b.origData = data - b.data = *data - b.pool = pool - b.refs = refObjectPool.Get().(*atomic.Int32) - b.refs.Add(1) - return b -} - -// Copy creates a new Buffer from the given data, initializing the reference -// counter to 1. -// -// It acquires a []byte from the given pool and copies over the backing array -// of the given data. The []byte acquired from the pool is returned to the -// pool when all references to the returned Buffer are released. -func Copy(data []byte, pool BufferPool) Buffer { - if IsBelowBufferPoolingThreshold(len(data)) { - buf := make(SliceBuffer, len(data)) - copy(buf, data) - return buf - } - - buf := pool.Get(len(data)) - copy(*buf, data) - return NewBuffer(buf, pool) -} - -func (b *buffer) ReadOnlyData() []byte { - if b.refs == nil { - panic("Cannot read freed buffer") - } - return b.data -} - -func (b *buffer) Ref() { - if b.refs == nil { - panic("Cannot ref freed buffer") - } - b.refs.Add(1) -} - -func (b *buffer) Free() { - if b.refs == nil { - panic("Cannot free freed buffer") - } - - refs := b.refs.Add(-1) - switch { - case refs > 0: - return - case refs == 0: - if b.pool != nil { - b.pool.Put(b.origData) - } - - refObjectPool.Put(b.refs) - b.origData = nil - b.data = nil - b.refs = nil - b.pool = nil - bufferObjectPool.Put(b) - default: - panic("Cannot free freed buffer") - } -} - -func (b *buffer) Len() int { - return len(b.ReadOnlyData()) -} - -func (b *buffer) split(n int) (Buffer, Buffer) { - if b.refs == nil { - panic("Cannot split freed buffer") - } - - b.refs.Add(1) - split := newBuffer() - split.origData = b.origData - split.data = b.data[n:] - split.refs = b.refs - split.pool = b.pool - - b.data = b.data[:n] - - return b, split -} - -func (b *buffer) read(buf []byte) (int, Buffer) { - if b.refs == nil { - panic("Cannot read freed buffer") - } - - n := copy(buf, b.data) - if n == len(b.data) { - b.Free() - return n, nil - } - - b.data = b.data[n:] - return n, b -} - -// String returns a string representation of the buffer. May be used for -// debugging purposes. -func (b *buffer) String() string { - return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) -} - -func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { - return buf.read(dst) -} - -// SplitUnsafe modifies the receiver to point to the first n bytes while it -// returns a new reference to the remaining bytes. The returned Buffer functions -// just like a normal reference acquired using Ref(). -func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { - return buf.split(n) -} - -type emptyBuffer struct{} - -func (e emptyBuffer) ReadOnlyData() []byte { - return nil -} - -func (e emptyBuffer) Ref() {} -func (e emptyBuffer) Free() {} - -func (e emptyBuffer) Len() int { - return 0 -} - -func (e emptyBuffer) split(n int) (left, right Buffer) { - return e, e -} - -func (e emptyBuffer) read(buf []byte) (int, Buffer) { - return 0, e -} - -type SliceBuffer []byte - -func (s SliceBuffer) ReadOnlyData() []byte { return s } -func (s SliceBuffer) Ref() {} -func (s SliceBuffer) Free() {} -func (s SliceBuffer) Len() int { return len(s) } - -func (s SliceBuffer) split(n int) (left, right Buffer) { - return s[:n], s[n:] -} - -func (s SliceBuffer) read(buf []byte) (int, Buffer) { - n := copy(buf, s) - if n == len(s) { - return n, nil - } - return n, s[n:] -} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index d2e15253bbfb..1e9485fd6e26 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -213,6 +213,11 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -223,7 +228,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insensitive comparison: MD is a map, and there's no guarantee + // Case insenitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/orca/server_metrics.go b/vendor/google.golang.org/grpc/orca/server_metrics.go index bb664d6a0814..67d1fa9d7f2b 100644 --- a/vendor/google.golang.org/grpc/orca/server_metrics.go +++ b/vendor/google.golang.org/grpc/orca/server_metrics.go @@ -108,7 +108,7 @@ type ServerMetricsRecorder interface { // SetMemoryUtilization sets the memory utilization server metric. Must be // in the range [0, 1]. SetMemoryUtilization(float64) - // DeleteMemoryUtilization deletes the memory utilization server metric to + // DeleteMemoryUtilization deletes the memory utiliztion server metric to // prevent it from being sent. DeleteMemoryUtilization() diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index e87a17f36a50..73bd63364335 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,7 +20,6 @@ package grpc import ( "google.golang.org/grpc/codes" - "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -32,10 +31,9 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData mem.BufferSlice + encodedData []byte hdr []byte - payload mem.BufferSlice - pf payloadFormat + payload []byte } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -59,27 +57,11 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - - materializedData := data.Materialize() - data.Free() - p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} - - // TODO: it should be possible to grab the bufferPool from the underlying - // stream implementation with a type cast to its actual type (such as - // addrConnStream) and accessing the buffer pool directly. - var compData mem.BufferSlice - compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) if err != nil { return err } - - if p.pf.isCompressed() { - materializedCompData := compData.Materialize() - compData.Free() - compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} - } - - p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) - + p.hdr, p.payload = msgHeader(data, compData) return nil } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 000000000000..3edca296c224 --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "remove existing generated files" +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') + +echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" +(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + +# Pull in code.proto as a proto dependency +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +mkdir -p ${WORKDIR}/out + +# Generates sources without the embed requirement +LEGACY_SOURCES=( + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + profiling/proto/service.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto +) + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing + +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 + +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 23bb3fb25824..c5fb45236faf 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.TrySchedule(func(ctx context.Context) { + ccr.serializer.Schedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.TrySchedule(func(ctx context.Context) { + ccr.serializer.Schedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.TrySchedule(func(context.Context) { + ccr.serializer.Schedule(func(context.Context) { if ccr.resolver == nil { return } @@ -177,9 +177,6 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - if !logger.V(0) && !channelz.IsOn() { - return - } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index db8865ec3fd3..fdd49e6e9151 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,6 +19,7 @@ package grpc import ( + "bytes" "compress/gzip" "context" "encoding/binary" @@ -34,7 +35,6 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -271,13 +271,17 @@ func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { } } -// WaitForReady configures the RPC's behavior when the client is in -// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If -// waitForReady is false, the RPC will fail immediately. Otherwise, the client -// will wait until a connection becomes available or the RPC's deadline is -// reached. +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. // -// By default, RPCs do not "wait for ready". +// By default, RPCs don't "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -511,51 +515,11 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = newCodecV1Bridge(o.Codec) + c.codec = o.Codec return nil } func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} -// ForceCodecV2 returns a CallOption that will set codec to be used for all -// request and response messages for a call. The result of calling Name() will -// be used as the content-subtype after converting to lowercase, unless -// CallContentSubtype is also used. -// -// See Content-Type on -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. Also see the documentation on RegisterCodec and -// CallContentSubtype for more details on the interaction between Codec and -// content-subtype. -// -// This function is provided for advanced users; prefer to use only -// CallContentSubtype to select a registered codec instead. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func ForceCodecV2(codec encoding.CodecV2) CallOption { - return ForceCodecV2CallOption{CodecV2: codec} -} - -// ForceCodecV2CallOption is a CallOption that indicates the codec used for -// marshaling messages. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ForceCodecV2CallOption struct { - CodecV2 encoding.CodecV2 -} - -func (o ForceCodecV2CallOption) before(c *callInfo) error { - c.codec = o.CodecV2 - return nil -} - -func (o ForceCodecV2CallOption) after(c *callInfo, attempt *csAttempt) {} - // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. // @@ -576,7 +540,7 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = newCodecV0Bridge(o.Codec) + c.codec = o.Codec return nil } func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} @@ -617,28 +581,19 @@ const ( compressionMade payloadFormat = 1 // compressed ) -func (pf payloadFormat) isCompressed() bool { - return pf == compressionMade -} - -type streamReader interface { - ReadHeader(header []byte) error - Read(n int) (mem.BufferSlice, error) -} - // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r streamReader + r io.Reader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // bufferPool is the pool of shared receive buffers. - bufferPool mem.BufferPool + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -653,15 +608,14 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying streamReader must not return an incompatible +// that the underlying io.Reader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { - err := p.r.ReadHeader(p.header[:]) - if err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { return 0, nil, err } - pf := payloadFormat(p.header[0]) + pf = payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -673,21 +627,20 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - - data, err := p.r.Read(int(length)) - if err != nil { + msg = p.recvBufferPool.Get(int(length)) + if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, data, nil + return pf, msg, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) (mem.BufferSlice, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -695,8 +648,7 @@ func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(b.Len()) > math.MaxUint32 { - b.Free() + if uint(len(b)) > math.MaxUint32 { return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -707,41 +659,34 @@ func encode(c baseCodec, msg any) (mem.BufferSlice, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { - if (compressor == nil && cp == nil) || in.Len() == 0 { - return nil, compressionNone, nil +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + if len(in) == 0 { + return nil, nil } - var out mem.BufferSlice - w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { - out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } + cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(w) + z, err := compressor.Compress(cbuf) if err != nil { - return nil, 0, wrapErr(err) + return nil, wrapErr(err) } - for _, b := range in { - if _, err := z.Write(b.ReadOnlyData()); err != nil { - return nil, 0, wrapErr(err) - } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) } if err := z.Close(); err != nil { - return nil, 0, wrapErr(err) + return nil, wrapErr(err) } } else { - // This is obviously really inefficient since it fully materializes the data, but - // there is no way around this with the old Compressor API. At least it attempts - // to return the buffer to the provider, in the hopes it can be reused (maybe - // even by a subsequent call to this very function). - buf := in.MaterializeToBuffer(pool) - defer buf.Free() - if err := cp.Do(w, buf.ReadOnlyData()); err != nil { - return nil, 0, wrapErr(err) + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) } } - return out, compressionMade, nil + return cbuf.Bytes(), nil } const ( @@ -752,36 +697,33 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { hdr = make([]byte, headerLen) - hdr[0] = byte(pf) - - var length uint32 - if pf.isCompressed() { - length = uint32(compData.Len()) - payload = compData + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData } else { - length = uint32(data.Len()) - payload = data + hdr[0] = byte(compressionNone) } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], length) - return hdr, payload + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data } -func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Length: dataLength, - WireLength: payloadLength + headerLen, - CompressedLength: payloadLength, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -789,11 +731,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - if isServer { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } else { - return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -803,129 +741,104 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes mem.BufferSlice -} - -func (p *payloadInfo) free() { - if p != nil && p.uncompressedBytes != nil { - p.uncompressedBytes.Free() - } + uncompressedBytes []byte } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -// TODO: Refactor this function to reduce the number of arguments. -// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, -) (out mem.BufferSlice, err error) { - pf, compressed, err := p.recvMsg(maxReceiveMessageSize) +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, +) (uncompressedBuf []byte, cancel func(), err error) { + pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, err + return nil, nil, err } - compressedLength := compressed.Len() - - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { - compressed.Free() - return nil, st.Err() + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, nil, st.Err() } var size int - if pf.isCompressed() { - defer compressed.Free() - + if pf == compressionMade { // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - var uncompressedBuf []byte - uncompressedBuf, err = dc.Do(compressed.Reader()) - if err == nil { - out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} - } + uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) size = len(uncompressedBuf) } else { - out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) + uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { - out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - out = compressed + uncompressedBuf = compressedBuf } if payInfo != nil { - payInfo.compressedLength = compressedLength - out.Ref() - payInfo.uncompressedBytes = out + payInfo.compressedLength = len(compressedBuf) + payInfo.uncompressedBytes = uncompressedBuf + + cancel = func() {} + } else { + cancel = func() { + p.recvBufferPool.Put(&compressedBuf) + } } - return out, nil + return uncompressedBuf, cancel, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { - dcReader, err := compressor.Decompress(d.Reader()) +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) if err != nil { return nil, 0, err } - - // TODO: Can/should this still be preserved with the new BufferSlice API? Are - // there any actual benefits to allocating a single large buffer instead of - // multiple smaller ones? - //if sizer, ok := compressor.(interface { - // DecompressedSize(compressedBytes []byte) int - //}); ok { - // if size := sizer.DecompressedSize(d); size >= 0 { - // if size > maxReceiveMessageSize { - // return nil, size, nil - // } - // // size is used as an estimate to size the buffer, but we - // // will read more data if available. - // // +MinRead so ReadFrom will not reallocate if size is correct. - // // - // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // // we can also utilize the recv buffer pool here. - // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - // return buf.Bytes(), int(bytesRead), err - // } - //} - - var out mem.BufferSlice - _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - if err != nil { - out.Free() - return nil, 0, err + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + // + // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // we can also utilize the recv buffer pool here. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } } - return out, out.Len(), nil + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { - data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } + defer cancel() - // If the codec wants its own reference to the data, it can get it. Otherwise, always - // free the buffers. - defer data.Free() - - if err := c.Unmarshal(data, m); err != nil { + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } - return nil } @@ -1028,7 +941,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.CodecV2); ok { + if ec, ok := c.codec.(encoding.Codec); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -1037,12 +950,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = getCodec(proto.Name) + c.codec = encoding.GetCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = getCodec(c.contentSubtype) + c.codec = encoding.GetCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 457d27338f79..89f8e4792bf1 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,7 +45,6 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -81,7 +80,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.BufferPool = bufferPool + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") @@ -171,7 +170,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - bufferPool mem.BufferPool + recvBufferPool SharedBufferPool waitForHandlers bool } @@ -182,7 +181,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - bufferPool: mem.DefaultBufferPool(), + recvBufferPool: nopBufferPool{}, } var globalServerOptions []ServerOption @@ -314,7 +313,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = newCodecV0Bridge(codec) + o.codec = codec }) } @@ -343,22 +342,7 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = newCodecV1Bridge(codec) - }) -} - -// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new -// CodecV2 interface. -// -// Will be supported throughout 1.x. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.codec = codecV2 + o.codec = codec }) } @@ -608,9 +592,26 @@ func WaitForHandlers(w bool) ServerOption { }) } -func bufferPool(bufferPool mem.BufferPool) ServerOption { +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.bufferPool = bufferPool + o.recvBufferPool = bufferPool }) } @@ -621,7 +622,7 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorker blocks on a *transport.Stream channel forever and waits for +// serverWorkers blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -979,7 +980,6 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, - BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,35 +1142,20 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - - compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) + compData, err := compress(data, cp, comp) if err != nil { - data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - - hdr, payload := msgHeader(data, compData, pf) - - defer func() { - compData.Free() - data.Free() - // payload does not need to be freed here, it is either data or compData, both of - // which are already freed. - }() - - dataLen := data.Len() - payloadLen := payload.Len() + hdr, payload := msgHeader(data, compData) // TODO(dfawley): should we be checking len(data) instead? - if payloadLen > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - if len(s.opts.statsHandlers) != 0 { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) - } + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1349,10 +1334,9 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} - defer payInfo.free() } - d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) + d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1363,22 +1347,24 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor t.IncrMsgRecv() } df := func(v any) error { + defer cancel() + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: d.Len(), + Length: len(d), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, + Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d.Materialize(), + Message: d, } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1562,7 +1548,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, bufferPool: s.opts.bufferPool}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1977,12 +1963,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return getCodec(proto.Name) + return encoding.GetCodec(proto.Name) } - codec := getCodec(contentSubtype) + codec := encoding.GetCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return getCodec(proto.Name) + return encoding.GetCodec(proto.Name) } return codec } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 000000000000..48a64cfe8e25 --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 71195c4943d7..fdb0bd65182c 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,6 +77,9 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any + // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. + Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -147,6 +150,9 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any + // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. + Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index bb2b2a216ce2..8051ef5b514a 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -41,7 +41,6 @@ import ( "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -360,7 +359,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { return nil, err } @@ -518,7 +517,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -567,15 +566,10 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - replayBuffer []replayOp // operations to replay on retry - replayBufferSize int // current size of replayBuffer -} - -type replayOp struct { - op func(a *csAttempt) error - cleanup func() + committed bool // active attempt committed for retry? + onCommit func() + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer } // csAttempt implements a single transport stream attempt within a @@ -613,12 +607,7 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - for _, op := range cs.replayBuffer { - if op.cleanup != nil { - op.cleanup() - } - } - cs.replayBuffer = nil + cs.buffer = nil } func (cs *clientStream) commitAttempt() { @@ -743,7 +732,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in replayBuffer always sets cs.attempt + // Note that the first op in the replay buffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -772,7 +761,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.replayBuffer) == 0 { + if len(cs.buffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -862,26 +851,25 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.replayBuffer { - if err := f.op(attempt); err != nil { + for _, f := range cs.buffer { + if err := f(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.replayBufferSize += sz - if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() - cleanup() return } - cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) + cs.buffer = append(cs.buffer, op) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -903,50 +891,23 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) if err != nil { return err } - defer func() { - data.Free() - // only free payload if compression was made, and therefore it is a different set - // of buffers from data. - if pf.isCompressed() { - payload.Free() - } - }() - - dataLen := data.Len() - payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if payloadLen > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - - // always take an extra ref in case data == payload (i.e. when the data isn't - // compressed). The original ref will always be freed by the deferred free above. - payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, dataLen, payloadLen) - } - - // onSuccess is invoked when the op is captured for a subsequent retry. If the - // stream was established by a previous message and therefore retries are - // disabled, onSuccess will not be invoked, and payloadRef can be freed - // immediately. - onSuccessCalled := false - err = cs.withRetry(op, func() { - cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) - onSuccessCalled = true - }) - if !onSuccessCalled { - payload.Free() + return a.sendMsg(m, hdr, payload, data) } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data.Materialize(), + Message: data, } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -963,7 +924,6 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} - defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -971,7 +931,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes.Materialize(), + Message: recvInfo.uncompressedBytes, } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -998,7 +958,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1074,7 +1034,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1092,10 +1052,8 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } return io.EOF } - if len(a.statsHandlers) != 0 { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) - } + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1107,7 +1065,6 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} - defer payInfo.free() } if !a.decompSet { @@ -1126,7 +1083,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1145,12 +1103,14 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: payInfo.uncompressedBytes.Len(), + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -1162,12 +1122,14 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success - } else if err != nil { - return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return toRPCErr(err) } func (a *csAttempt) finish(err error) { @@ -1223,12 +1185,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newNonRetryClientStream creates a ClientStream with the specified transport, on the +// newClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transport. +// of using ac.transpot. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1314,7 +1276,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1411,26 +1373,17 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) if err != nil { return err } - defer func() { - data.Free() - // only free payload if compression was made, and therefore it is a different set - // of buffers from data. - if pf.isCompressed() { - payload.Free() - } - }() - // TODO(dfawley): should we be checking len(data) instead? - if payload.Len() > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1470,7 +1423,8 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1490,12 +1444,14 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success - } else if err != nil { - return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return toRPCErr(err) } func (as *addrConnStream) finish(err error) { @@ -1689,31 +1645,18 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { return err } - defer func() { - data.Free() - // only free payload if compression was made, and therefore it is a different set - // of buffers from data. - if pf.isCompressed() { - payload.Free() - } - }() - - dataLen := data.Len() - payloadLen := payload.Len() - // TODO(dfawley): should we be checking len(data) instead? - if payloadLen > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1726,7 +1669,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data.Materialize(), + Message: data, } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1734,7 +1677,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) } } return nil @@ -1771,9 +1714,8 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} - defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1791,9 +1733,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - Length: payInfo.uncompressedBytes.Len(), + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1801,7 +1745,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes.Materialize(), + Message: payInfo.uncompressedBytes, } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1816,26 +1760,23 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data using the compressors passed or -// using the passed preparedmsg. The returned boolean indicates whether -// compression was made and therefore whether the payload needs to be freed in -// addition to the returned data. Freeing the payload if the returned boolean is -// false can lead to undefined behavior. -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, 0, err + return nil, nil, nil, err } - compData, pf, err := compress(data, cp, comp, pool) + compData, err := compress(data, cp, comp) if err != nil { - data.Free() - return nil, nil, nil, 0, err + return nil, nil, nil, err } - hdr, payload = msgHeader(data, compData, pf) - return hdr, data, payload, pf, nil + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil } diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go index e6eb4feebb99..3f77f4876eb8 100644 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go @@ -109,7 +109,7 @@ type pipe struct { mu sync.Mutex // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respectively. + // with r and w pointing to the offset to read and write, respsectively. // // Data is read between [r, w) and written to [w, r), wrapping around the end // of the slice if necessary. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 1ffec6e2cee2..bafaef99be98 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.66.0" +const Version = "1.65.0" diff --git a/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go b/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go index 578e1278970d..cb022b45de18 100644 --- a/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go +++ b/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go @@ -58,7 +58,7 @@ func (t *tlsCredsBuilder) Name() string { } // googleDefaultCredsBuilder implements the `Credentials` interface defined in -// package `xds/bootstrap` and encapsulates a Google Default credential. +// package `xds/boostrap` and encapsulates a Google Default credential. type googleDefaultCredsBuilder struct{} func (d *googleDefaultCredsBuilder) Build(json.RawMessage) (credentials.Bundle, func(), error) { diff --git a/vendor/google.golang.org/grpc/xds/csds/csds.go b/vendor/google.golang.org/grpc/xds/csds/csds.go index 3d8398a72ff0..6266f60e86d9 100644 --- a/vendor/google.golang.org/grpc/xds/csds/csds.go +++ b/vendor/google.golang.org/grpc/xds/csds/csds.go @@ -27,6 +27,7 @@ import ( "context" "fmt" "io" + "sync" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" @@ -54,14 +55,22 @@ func prefixLogger(s *ClientStatusDiscoveryServer) *internalgrpclog.PrefixLogger // https://github.com/grpc/proposal/blob/master/A40-csds-support.md. type ClientStatusDiscoveryServer struct { logger *internalgrpclog.PrefixLogger + + mu sync.Mutex + xdsClient xdsclient.XDSClient + xdsClientClose func() } // NewClientStatusDiscoveryServer returns an implementation of the CSDS server // that can be registered on a gRPC server. func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) { - s := &ClientStatusDiscoveryServer{} + c, close, err := xdsclient.New() + if err != nil { + logger.Warningf("Failed to create xDS client: %v", err) + } + s := &ClientStatusDiscoveryServer{xdsClient: c, xdsClientClose: close} s.logger = prefixLogger(s) - s.logger.Infof("Created CSDS server") + s.logger.Infof("Created CSDS server, with xdsClient %p", c) return s, nil } @@ -95,14 +104,24 @@ func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req * // // If it returns an error, the error is a status error. func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.xdsClient == nil { + return &v3statuspb.ClientStatusResponse{}, nil + } // Field NodeMatchers is unsupported, by design // https://github.com/grpc/proposal/blob/master/A40-csds-support.md#detail-node-matching. if len(req.NodeMatchers) != 0 { return nil, status.Errorf(codes.InvalidArgument, "node_matchers are not supported, request contains node_matchers: %v", req.NodeMatchers) } - return xdsclient.DumpResources(), nil + return s.xdsClient.DumpResources() } // Close cleans up the resources. -func (s *ClientStatusDiscoveryServer) Close() {} +func (s *ClientStatusDiscoveryServer) Close() { + if s.xdsClientClose != nil { + s.xdsClientClose() + } +} diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go index 936bf2da3274..6ab7fb03f2dc 100644 --- a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -26,7 +26,6 @@ package googledirectpath import ( - "encoding/json" "fmt" "math/rand" "net/url" @@ -38,6 +37,7 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/xdsclient" _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. ) @@ -46,21 +46,30 @@ const ( c2pScheme = "google-c2p" c2pAuthority = "traffic-director-c2p.xds.googleapis.com" - tdURL = "dns:///directpath-pa.googleapis.com" - zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" - ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" - ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" - httpReqTimeout = 10 * time.Second + tdURL = "dns:///directpath-pa.googleapis.com" + httpReqTimeout = 10 * time.Second + zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" + ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" + + gRPCUserAgentName = "gRPC Go" + clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" + clientFeatureResourceWrapper = "xds.config.resource-in-sotw" + ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" + + logPrefix = "[google-c2p-resolver]" - logPrefix = "[google-c2p-resolver]" dnsName, xdsName = "dns", "xds" ) // For overriding in unittests. var ( - onGCE = googlecloud.OnGCE - randInt = rand.Int - logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) + onGCE = googlecloud.OnGCE + + newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, func(), error) { + return xdsclient.NewWithConfig(config) + } + + logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) ) func init() { @@ -99,18 +108,23 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts xdsServerCfg := newXdsServerConfig(xdsServerURI) authoritiesCfg := newAuthoritiesConfig(xdsServerCfg) - cfg := map[string]any{ - "xds_servers": []any{xdsServerCfg}, - "client_default_listener_resource_name_template": "%s", - "authorities": authoritiesCfg, - "node": nodeCfg, - } - cfgJSON, err := json.Marshal(cfg) + config, err := bootstrap.NewConfigFromContents([]byte(fmt.Sprintf(` + { + "xds_servers": [%s], + "client_default_listener_resource_name_template": "%%s", + "authorities": %s, + "node": %s + }`, xdsServerCfg, authoritiesCfg, nodeCfg))) + if err != nil { - return nil, fmt.Errorf("failed to marshal bootstrap configuration: %v", err) + return nil, fmt.Errorf("failed to build bootstrap configuration: %v", err) } - if err := bootstrap.SetFallbackBootstrapConfig(cfgJSON); err != nil { - return nil, fmt.Errorf("failed to set fallback bootstrap configuration: %v", err) + + // Create singleton xds client with this config. The xds client will be + // used by the xds resolver later. + _, close, err := newClientWithConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to start xDS client: %v", err) } t = resolver.Target{ @@ -120,36 +134,66 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts Path: t.URL.Path, }, } - return resolver.Get(xdsName).Build(t, cc, opts) + xdsR, err := resolver.Get(xdsName).Build(t, cc, opts) + if err != nil { + close() + return nil, err + } + return &c2pResolver{ + Resolver: xdsR, + clientCloseFunc: close, + }, nil } func (b c2pResolverBuilder) Scheme() string { return c2pScheme } -func newNodeConfig(zone string, ipv6Capable bool) map[string]any { - node := map[string]any{ - "id": fmt.Sprintf("C2P-%d", randInt()), - "locality": map[string]any{"zone": zone}, - } +type c2pResolver struct { + resolver.Resolver + clientCloseFunc func() +} + +func (r *c2pResolver) Close() { + r.Resolver.Close() + r.clientCloseFunc() +} + +var id = fmt.Sprintf("C2P-%d", rand.Int()) + +func newNodeConfig(zone string, ipv6Capable bool) string { + metadata := "" if ipv6Capable { - node["metadata"] = map[string]any{ipv6CapableMetadataName: true} + metadata = fmt.Sprintf(`, "metadata": { "%s": true }`, ipv6CapableMetadataName) } - return node + + return fmt.Sprintf(` + { + "id": "%s", + "locality": { + "zone": "%s" + } + %s + }`, id, zone, metadata) } -func newAuthoritiesConfig(serverCfg map[string]any) map[string]any { - return map[string]any{ - c2pAuthority: map[string]any{"xds_servers": []any{serverCfg}}, +func newAuthoritiesConfig(xdsServer string) string { + return fmt.Sprintf(` + { + "%s": { + "xds_servers": [%s] + } } + `, c2pAuthority, xdsServer) } -func newXdsServerConfig(uri string) map[string]any { - return map[string]any{ - "server_uri": uri, - "channel_creds": []map[string]any{{"type": "google_default"}}, - "server_features": []any{"ignore_resource_deletion"}, - } +func newXdsServerConfig(xdsServerURI string) string { + return fmt.Sprintf(` + { + "server_uri": "%s", + "channel_creds": [{"type": "google_default"}], + "server_features": ["xds_v3", "ignore_resource_deletion", "xds.config.resource-in-sotw"] + }`, xdsServerURI) } // runDirectPath returns whether this resolver should use direct path. diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go index df879722046e..8e97e104ed4b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -207,7 +207,7 @@ func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) e } // A root provider is required whether we are using TLS or mTLS. - cpc := b.xdsClient.BootstrapConfig().CertProviderConfigs() + cpc := b.xdsClient.BootstrapConfig().CertProviderConfigs rootProvider, err := buildProvider(cpc, config.RootInstanceName, config.RootCertName, false, true) if err != nil { return err @@ -309,8 +309,8 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro b.lbCfg = lbCfg // Handle the update in a blocking fashion. - errCh := make(chan error, 1) - callback := func(context.Context) { + done := make(chan struct{}) + ok = b.serializer.Schedule(func(context.Context) { // A config update with a changed top-level cluster name means that none // of our old watchers make any sense any more. b.closeAllWatchers() @@ -319,20 +319,20 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro // could end up creating more watchers if turns out to be an aggregate // cluster. b.createAndAddWatcherForCluster(lbCfg.ClusterName) - errCh <- nil - } - onFailure := func() { + close(done) + }) + if !ok { // The call to Schedule returns false *only* if the serializer has been // closed, which happens only when we receive an update after close. - errCh <- errBalancerClosed + return errBalancerClosed } - b.serializer.ScheduleOr(callback, onFailure) - return <-errCh + <-done + return nil } // ResolverError handles errors reported by the xdsResolver. func (b *cdsBalancer) ResolverError(err error) { - b.serializer.TrySchedule(func(context.Context) { + b.serializer.Schedule(func(context.Context) { // Resource not found error is reported by the resolver when the // top-level cluster resource is removed by the management server. if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { @@ -351,7 +351,7 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } -// Closes all registered cluster watchers and removes them from the internal map. +// Closes all registered cluster wathers and removes them from the internal map. // // Only executed in the context of a serializer callback. func (b *cdsBalancer) closeAllWatchers() { @@ -364,7 +364,7 @@ func (b *cdsBalancer) closeAllWatchers() { // Close cancels the CDS watch, closes the child policy and closes the // cdsBalancer. func (b *cdsBalancer) Close() { - b.serializer.TrySchedule(func(ctx context.Context) { + b.serializer.Schedule(func(ctx context.Context) { b.closeAllWatchers() if b.childLB != nil { @@ -384,7 +384,7 @@ func (b *cdsBalancer) Close() { } func (b *cdsBalancer) ExitIdle() { - b.serializer.TrySchedule(func(context.Context) { + b.serializer.Schedule(func(context.Context) { if b.childLB == nil { b.logger.Warningf("Received ExitIdle with no child policy") return diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go index 39dce3d56a2a..0b0d168376d7 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go @@ -32,19 +32,22 @@ type clusterWatcher struct { parent *cdsBalancer } -func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData, onDone xdsresource.DoneNotifier) { - handleUpdate := func(context.Context) { cw.parent.onClusterUpdate(cw.name, u.Resource); onDone.OnDone() } - cw.parent.serializer.ScheduleOr(handleUpdate, onDone.OnDone) +func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData) { + cw.parent.serializer.Schedule(func(context.Context) { + cw.parent.onClusterUpdate(cw.name, u.Resource) + }) } -func (cw *clusterWatcher) OnError(err error, onDone xdsresource.DoneNotifier) { - handleError := func(context.Context) { cw.parent.onClusterError(cw.name, err); onDone.OnDone() } - cw.parent.serializer.ScheduleOr(handleError, onDone.OnDone) +func (cw *clusterWatcher) OnError(err error) { + cw.parent.serializer.Schedule(func(context.Context) { + cw.parent.onClusterError(cw.name, err) + }) } -func (cw *clusterWatcher) OnResourceDoesNotExist(onDone xdsresource.DoneNotifier) { - handleNotFound := func(context.Context) { cw.parent.onClusterResourceNotFound(cw.name); onDone.OnDone() } - cw.parent.serializer.ScheduleOr(handleNotFound, onDone.OnDone) +func (cw *clusterWatcher) OnResourceDoesNotExist() { + cw.parent.serializer.Schedule(func(context.Context) { + cw.parent.onClusterResourceNotFound(cw.name) + }) } // watcherState groups the state associated with a clusterWatcher. diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go index 9058f0d01fc8..164f3099d280 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" @@ -53,8 +52,6 @@ const ( defaultRequestCountMax = 1024 ) -var connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) - func init() { balancer.Register(bb{}) } @@ -363,35 +360,22 @@ func (scw *scWrapper) localityID() xdsinternal.LocalityID { func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { clusterName := b.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) + var lID xdsinternal.LocalityID for i, addr := range addrs { newAddrs[i] = xds.SetXDSHandshakeClusterName(addr, clusterName) + lID = xdsinternal.GetLocalityID(newAddrs[i]) } var sc balancer.SubConn - scw := &scWrapper{} oldListener := opts.StateListener - opts.StateListener = func(state balancer.SubConnState) { - b.updateSubConnState(sc, state, oldListener) - if state.ConnectivityState != connectivity.Ready { - return - } - // Read connected address and call updateLocalityID() based on the connected - // address's locality. https://github.com/grpc/grpc-go/issues/7339 - addr := connectedAddress(state) - lID := xdsinternal.GetLocalityID(addr) - if lID.Empty() { - if b.logger.V(2) { - b.logger.Infof("Locality ID for %s unexpectedly empty", addr) - } - return - } - scw.updateLocalityID(lID) - } + opts.StateListener = func(state balancer.SubConnState) { b.updateSubConnState(sc, state, oldListener) } sc, err := b.ClientConn.NewSubConn(newAddrs, opts) if err != nil { return nil, err } - scw.SubConn = sc - return scw, nil + // Wrap this SubConn in a wrapper, and add it to the map. + ret := &scWrapper{SubConn: sc} + ret.updateLocalityID(lID) + return ret, nil } func (b *clusterImplBalancer) RemoveSubConn(sc balancer.SubConn) { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go index fbadbb92ba39..d8cb8df1a81c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -19,8 +19,6 @@ package clusterimpl import ( - "context" - v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -98,23 +96,14 @@ func (b *clusterImplBalancer) newPicker(config *dropConfigs) *picker { } } -func telemetryLabels(ctx context.Context) map[string]string { - if ctx == nil { - return nil - } - labels := stats.GetLabels(ctx) - if labels == nil { - return nil - } - return labels.TelemetryLabels -} - func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // Unconditionally set labels if present, even dropped or queued RPC's can // use these labels. - if labels := telemetryLabels(info.Ctx); labels != nil { - for key, value := range d.telemetryLabels { - labels[key] = value + if info.Ctx != nil { + if labels := stats.GetLabels(info.Ctx); labels != nil && labels.TelemetryLabels != nil { + for key, value := range d.telemetryLabels { + labels.TelemetryLabels[key] = value + } } } @@ -167,10 +156,6 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return pr, err } - if labels := telemetryLabels(info.Ctx); labels != nil { - labels["grpc.lb.locality"] = lIDStr - } - if d.loadStore != nil { d.loadStore.CallStarted(lIDStr) oldDone := pr.Done diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go index 75bb847fb18d..83ead92a4a69 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -207,6 +207,11 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { // handleResourceUpdate handles a resource update or error from the resource // resolver by propagating the same to the child LB policy. func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { + if err := update.err; err != nil { + b.handleErrorFromUpdate(err, false) + return + } + b.watchUpdateReceived = true b.priorities = update.priorities @@ -214,10 +219,6 @@ func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { // for all configured discovery mechanisms ordered by priority. This is used // to generate configuration for the priority LB policy. b.updateChildConfig() - - if update.onDone != nil { - update.onDone.OnDone() - } } // updateChildConfig builds child policy configuration using endpoint addresses diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index 37287913c087..151c54dae6d0 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -30,14 +30,8 @@ import ( // resourceUpdate is a combined update from all the resources, in the order of // priority. For example, it can be {EDS, EDS, DNS}. type resourceUpdate struct { - // A discovery mechanism would return an empty update when it runs into - // errors, and this would result in the priority LB policy reporting - // TRANSIENT_FAILURE (if there was a single discovery mechanism), or would - // fallback to the next highest priority that is available. priorities []priorityConfig - // To be invoked once the update is completely processed, or is dropped in - // favor of a newer update. - onDone xdsresource.DoneNotifier + err error } // topLevelResolver is used by concrete endpointsResolver implementations for @@ -45,11 +39,7 @@ type resourceUpdate struct { // interface and takes appropriate actions upon receipt of updates and errors // from underlying concrete resolvers. type topLevelResolver interface { - // onUpdate is called when a new update is received from the underlying - // endpointsResolver implementation. The onDone callback is to be invoked - // once the update is completely processed, or is dropped in favor of a - // newer update. - onUpdate(onDone xdsresource.DoneNotifier) + onUpdate() } // endpointsResolver wraps the functionality to resolve a given resource name to @@ -215,7 +205,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { } // Regenerate even if there's no change in discovery mechanism, in case // priority order changed. - rr.generateLocked(xdsresource.NopDoneNotifier{}) + rr.generateLocked() } // resolveNow is typically called to trigger re-resolve of DNS. The EDS @@ -262,10 +252,7 @@ func (rr *resourceResolver) stop(closing bool) { // after they are stopped. Therefore, we don't have to worry about another // write to this channel happening at the same time as this one. select { - case ru := <-rr.updateChannel: - if ru.onDone != nil { - ru.onDone.OnDone() - } + case <-rr.updateChannel: default: } rr.updateChannel <- &resourceUpdate{} @@ -275,20 +262,14 @@ func (rr *resourceResolver) stop(closing bool) { // result on the update channel if all child resolvers have received at least // one update. Otherwise it returns early. // -// The onDone callback is invoked inline if not all child resolvers have -// received at least one update. If all child resolvers have received at least -// one update, onDone is invoked when the combined update is processed by the -// clusterresolver LB policy. -// -// Caller must hold rr.mu. -func (rr *resourceResolver) generateLocked(onDone xdsresource.DoneNotifier) { +// caller must hold rr.mu. +func (rr *resourceResolver) generateLocked() { var ret []priorityConfig for _, rDM := range rr.children { u, ok := rDM.r.lastUpdate() if !ok { // Don't send updates to parent until all resolvers have update to // send. - onDone.OnDone() return } switch uu := u.(type) { @@ -299,23 +280,16 @@ func (rr *resourceResolver) generateLocked(onDone xdsresource.DoneNotifier) { } } select { - // A previously unprocessed update is dropped in favor of the new one, and - // the former's onDone callback is invoked to unblock the xDS client's - // receive path. - case ru := <-rr.updateChannel: - if ru.onDone != nil { - ru.onDone.OnDone() - } + case <-rr.updateChannel: default: } - rr.updateChannel <- &resourceUpdate{priorities: ret, onDone: onDone} + rr.updateChannel <- &resourceUpdate{priorities: ret} } -func (rr *resourceResolver) onUpdate(onDone xdsresource.DoneNotifier) { - handleUpdate := func(context.Context) { +func (rr *resourceResolver) onUpdate() { + rr.serializer.Schedule(func(context.Context) { rr.mu.Lock() - rr.generateLocked(onDone) + rr.generateLocked() rr.mu.Unlock() - } - rr.serializer.ScheduleOr(handleUpdate, func() { onDone.OnDone() }) + }) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index b22810e22080..efdc3088a395 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -27,7 +27,6 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var ( @@ -80,7 +79,7 @@ func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *gr ret.logger.Infof("Failed to parse dns hostname %q in clusterresolver LB policy", target) } ret.updateReceived = true - ret.topLevelResolver.onUpdate(xdsresource.NopDoneNotifier{}) + ret.topLevelResolver.onUpdate() return ret } @@ -90,7 +89,7 @@ func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *gr ret.logger.Infof("Failed to build DNS resolver for target %q: %v", target, err) } ret.updateReceived = true - ret.topLevelResolver.onUpdate(xdsresource.NopDoneNotifier{}) + ret.topLevelResolver.onUpdate() return ret } ret.dnsR = r @@ -154,7 +153,7 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { dr.updateReceived = true dr.mu.Unlock() - dr.topLevelResolver.onUpdate(xdsresource.NopDoneNotifier{}) + dr.topLevelResolver.onUpdate() return nil } @@ -177,7 +176,7 @@ func (dr *dnsDiscoveryMechanism) ReportError(err error) { dr.updateReceived = true dr.mu.Unlock() - dr.topLevelResolver.onUpdate(xdsresource.NopDoneNotifier{}) + dr.topLevelResolver.onUpdate() } func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go index 16192045815c..3d0ec356e93a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -76,9 +76,8 @@ func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelR } // OnUpdate is invoked to report an update for the resource being watched. -func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData, onDone xdsresource.DoneNotifier) { +func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData) { if er.stopped.HasFired() { - onDone.OnDone() return } @@ -86,12 +85,11 @@ func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceD er.update = &update.Resource er.mu.Unlock() - er.topLevelResolver.onUpdate(onDone) + er.topLevelResolver.onUpdate() } -func (er *edsDiscoveryMechanism) OnError(err error, onDone xdsresource.DoneNotifier) { +func (er *edsDiscoveryMechanism) OnError(err error) { if er.stopped.HasFired() { - onDone.OnDone() return } @@ -104,7 +102,6 @@ func (er *edsDiscoveryMechanism) OnError(err error, onDone xdsresource.DoneNotif // Continue using a previously received good configuration if one // exists. er.mu.Unlock() - onDone.OnDone() return } @@ -117,12 +114,11 @@ func (er *edsDiscoveryMechanism) OnError(err error, onDone xdsresource.DoneNotif er.update = &xdsresource.EndpointsUpdate{} er.mu.Unlock() - er.topLevelResolver.onUpdate(onDone) + er.topLevelResolver.onUpdate() } -func (er *edsDiscoveryMechanism) OnResourceDoesNotExist(onDone xdsresource.DoneNotifier) { +func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() { if er.stopped.HasFired() { - onDone.OnDone() return } @@ -140,5 +136,5 @@ func (er *edsDiscoveryMechanism) OnResourceDoesNotExist(onDone xdsresource.DoneN er.update = &xdsresource.EndpointsUpdate{} er.mu.Unlock() - er.topLevelResolver.onUpdate(onDone) + er.topLevelResolver.onUpdate() } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go index f5605df83276..8ce958d71ca8 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go @@ -36,7 +36,7 @@ func NewWrapper() *Wrapper { // update its internal perCluster store so that new stats will be added to the // correct perCluster. // -// Note that this struct is a temporary workaround before we implement graceful +// Note that this struct is a temporary walkaround before we implement graceful // switch for EDS. Any update to the clusterName and serviceName is too early, // the perfect timing is when the picker is updated with the new connection. // This early update could cause picks for the old SubConn being reported to the diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go index c17c62f23a59..988ca280789e 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -270,7 +270,6 @@ func (b *priorityBalancer) run() { // deadlock. b.mu.Lock() if b.done.HasFired() { - b.mu.Unlock() return } switch s := u.(type) { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go index 0be807c134a1..4655bf418474 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go @@ -83,9 +83,7 @@ var ( // Caller must hold b.mu. func (b *priorityBalancer) syncPriority(childUpdating string) { if b.inhibitPickerUpdates { - if b.logger.V(2) { - b.logger.Infof("Skipping update from child policy %q", childUpdating) - } + b.logger.Debugf("Skipping update from child policy %q", childUpdating) return } for p, name := range b.priorities { @@ -101,16 +99,12 @@ func (b *priorityBalancer) syncPriority(childUpdating string) { (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || p == len(b.priorities)-1 { if b.childInUse != child.name || child.name == childUpdating { - if b.logger.V(2) { - b.logger.Infof("childInUse, childUpdating: %q, %q", b.childInUse, child.name) - } + b.logger.Debugf("childInUse, childUpdating: %q, %q", b.childInUse, child.name) // If we switch children or the child in use just updated its // picker, push the child's picker to the parent. b.cc.UpdateState(child.state) } - if b.logger.V(2) { - b.logger.Infof("Switching to (%q, %v) in syncPriority", child.name, p) - } + b.logger.Debugf("Switching to (%q, %v) in syncPriority", child.name, p) b.switchToChild(child, p) break } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go index 45dbb2d2a83f..eac89b5b4d05 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go @@ -67,15 +67,11 @@ type ringEntry struct { // // Must be called with a non-empty subConns map. func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, logger *grpclog.PrefixLogger) *ring { - if logger.V(2) { - logger.Infof("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize) - } + logger.Debugf("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize) // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 normalizedWeights, minWeight := normalizeWeights(subConns) - if logger.V(2) { - logger.Infof("newRing: normalized subConn weights is %v", normalizedWeights) - } + logger.Debugf("newRing: normalized subConn weights is %v", normalizedWeights) // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. @@ -86,9 +82,7 @@ func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, log scale := math.Min(math.Ceil(minWeight*float64(minRingSize))/minWeight, float64(maxRingSize)) ringSize := math.Ceil(scale) items := make([]*ringEntry, 0, int(ringSize)) - if logger.V(2) { - logger.Infof("newRing: creating new ring of size %v", ringSize) - } + logger.Debugf("newRing: creating new ring of size %v", ringSize) // For each entry, scale*weight nodes are generated in the ring. // diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go index f17977e6d084..37de3a39b64f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go @@ -117,7 +117,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { // "If absent, no enforcing RBAC policy will be applied" - RBAC // Documentation for Rules field. // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configured." - A41 + // completely ignored, as if RBAC was not configurated." - A41 if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG { return config{}, nil } @@ -128,7 +128,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "") if err != nil { // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configured." - A41 + // completely ignored, as if RBAC was not configurated." - A41 if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG { return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err) } @@ -198,7 +198,7 @@ func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override http // "If absent, no enforcing RBAC policy will be applied" - RBAC // Documentation for Rules field. // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configured." - A41 + // completely ignored, as if RBAC was not configurated." - A41 if c.chainEngine == nil { return nil, nil } diff --git a/vendor/google.golang.org/grpc/xds/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/internal.go index 1d8a6b03f1b3..7091990500f9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -55,11 +55,6 @@ func (l LocalityID) Equal(o any) bool { return l.Region == ol.Region && l.Zone == ol.Zone && l.SubZone == ol.SubZone } -// Empty returns whether or not the locality ID is empty. -func (l LocalityID) Empty() bool { - return l.Region == "" && l.Zone == "" && l.SubZone == "" -} - // LocalityIDFromString converts a json representation of locality, into a // LocalityID struct. func LocalityIDFromString(s string) (ret LocalityID, _ error) { diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go index d9c23278281f..f505eeb4394e 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go @@ -26,5 +26,5 @@ var ( NewWRR any // func() wrr.WRR // NewXDSClient is the function used to create a new xDS client. - NewXDSClient any // func(string) (xdsclient.XDSClient, func(), error) + NewXDSClient any // func() (xdsclient.XDSClient, func(), error) ) diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go index aec81489e5fc..f5bfc500c11a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -182,7 +182,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP if v := atomic.AddInt32(ref, -1); v == 0 { // This entry will be removed from activeClusters when // producing the service config for the empty update. - cs.r.serializer.TrySchedule(func(context.Context) { + cs.r.serializer.Schedule(func(context.Context) { cs.r.onClusterRefDownToZero() }) } @@ -326,7 +326,7 @@ func (cs *configSelector) stop() { // selector; we need another update to delete clusters from the config (if // we don't have another update pending already). if needUpdate { - cs.r.serializer.TrySchedule(func(context.Context) { + cs.r.serializer.Schedule(func(context.Context) { cs.r.onClusterRefDownToZero() }) } diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go index b64f40c03939..abb3c2c5acf1 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go @@ -36,19 +36,22 @@ func newListenerWatcher(resourceName string, parent *xdsResolver) *listenerWatch return lw } -func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.DoneNotifier) { - handleUpdate := func(context.Context) { l.parent.onListenerResourceUpdate(update.Resource); onDone.OnDone() } - l.parent.serializer.ScheduleOr(handleUpdate, onDone.OnDone) +func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData) { + l.parent.serializer.Schedule(func(context.Context) { + l.parent.onListenerResourceUpdate(update.Resource) + }) } -func (l *listenerWatcher) OnError(err error, onDone xdsresource.DoneNotifier) { - handleError := func(context.Context) { l.parent.onListenerResourceError(err); onDone.OnDone() } - l.parent.serializer.ScheduleOr(handleError, onDone.OnDone) +func (l *listenerWatcher) OnError(err error) { + l.parent.serializer.Schedule(func(context.Context) { + l.parent.onListenerResourceError(err) + }) } -func (l *listenerWatcher) OnResourceDoesNotExist(onDone xdsresource.DoneNotifier) { - handleNotFound := func(context.Context) { l.parent.onListenerResourceNotFound(); onDone.OnDone() } - l.parent.serializer.ScheduleOr(handleNotFound, onDone.OnDone) +func (l *listenerWatcher) OnResourceDoesNotExist() { + l.parent.serializer.Schedule(func(context.Context) { + l.parent.onListenerResourceNotFound() + }) } func (l *listenerWatcher) stop() { @@ -68,22 +71,22 @@ func newRouteConfigWatcher(resourceName string, parent *xdsResolver) *routeConfi return rw } -func (r *routeConfigWatcher) OnUpdate(u *xdsresource.RouteConfigResourceData, onDone xdsresource.DoneNotifier) { - handleUpdate := func(context.Context) { - r.parent.onRouteConfigResourceUpdate(r.resourceName, u.Resource) - onDone.OnDone() - } - r.parent.serializer.ScheduleOr(handleUpdate, onDone.OnDone) +func (r *routeConfigWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) { + r.parent.serializer.Schedule(func(context.Context) { + r.parent.onRouteConfigResourceUpdate(r.resourceName, update.Resource) + }) } -func (r *routeConfigWatcher) OnError(err error, onDone xdsresource.DoneNotifier) { - handleError := func(context.Context) { r.parent.onRouteConfigResourceError(r.resourceName, err); onDone.OnDone() } - r.parent.serializer.ScheduleOr(handleError, onDone.OnDone) +func (r *routeConfigWatcher) OnError(err error) { + r.parent.serializer.Schedule(func(context.Context) { + r.parent.onRouteConfigResourceError(r.resourceName, err) + }) } -func (r *routeConfigWatcher) OnResourceDoesNotExist(onDone xdsresource.DoneNotifier) { - handleNotFound := func(context.Context) { r.parent.onRouteConfigResourceNotFound(r.resourceName); onDone.OnDone() } - r.parent.serializer.ScheduleOr(handleNotFound, onDone.OnDone) +func (r *routeConfigWatcher) OnResourceDoesNotExist() { + r.parent.serializer.Schedule(func(context.Context) { + r.parent.onRouteConfigResourceNotFound(r.resourceName) + }) } func (r *routeConfigWatcher) stop() { diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go index 8d20d5882c38..40dd97267811 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go @@ -49,8 +49,8 @@ const Scheme = "xds" // ClientConns at the same time. func newBuilderForTesting(config []byte) (resolver.Builder, error) { return &xdsResolverBuilder{ - newXDSClient: func(name string) (xdsclient.XDSClient, func(), error) { - return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Name: name, Contents: config}) + newXDSClient: func() (xdsclient.XDSClient, func(), error) { + return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Contents: config}) }, }, nil } @@ -64,7 +64,7 @@ func init() { } type xdsResolverBuilder struct { - newXDSClient func(string) (xdsclient.XDSClient, func(), error) + newXDSClient func() (xdsclient.XDSClient, func(), error) } // Build helps implement the resolver.Builder interface. @@ -97,11 +97,11 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon r.serializerCancel = cancel // Initialize the xDS client. - newXDSClient := rinternal.NewXDSClient.(func(string) (xdsclient.XDSClient, func(), error)) + newXDSClient := rinternal.NewXDSClient.(func() (xdsclient.XDSClient, func(), error)) if b.newXDSClient != nil { newXDSClient = b.newXDSClient } - client, close, err := newXDSClient(target.String()) + client, close, err := newXDSClient() if err != nil { return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) } @@ -139,13 +139,9 @@ func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, opts // Find the client listener template to use from the bootstrap config: // - If authority is not set in the target, use the top level template // - If authority is set, use the template from the authority map. - template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate() + template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate if authority := target.URL.Host; authority != "" { - authorities := bootstrapConfig.Authorities() - if authorities == nil { - return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target) - } - a := authorities[authority] + a := bootstrapConfig.Authorities[authority] if a == nil { return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target) } diff --git a/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go index d2a13d75c591..fdba769294de 100644 --- a/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go @@ -47,7 +47,7 @@ type connWrapper struct { // The specific filter chain picked for handling this connection. filterChain *xdsresource.FilterChain - // A reference to the listenerWrapper on which this connection was accepted. + // A reference fo the listenerWrapper on which this connection was accepted. parent *listenerWrapper // The certificate providers created for this connection. @@ -107,7 +107,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { return xdsinternal.NewHandshakeInfo(nil, nil, nil, false), nil } - cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs() + cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs // Identity provider name is mandatory on the server-side, and this is // enforced when the resource is received at the XDSClient layer. secCfg := c.filterChain.SecurityCfg diff --git a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go index cdbc897f1bc7..174b54c44117 100644 --- a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go @@ -410,8 +410,7 @@ type ldsWatcher struct { name string } -func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.DoneNotifier) { - defer onDone.OnDone() +func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData) { if lw.parent.closed.HasFired() { lw.logger.Warningf("Resource %q received update: %#v after listener was closed", lw.name, update) return @@ -422,8 +421,7 @@ func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone lw.parent.handleLDSUpdate(update.Resource) } -func (lw *ldsWatcher) OnError(err error, onDone xdsresource.DoneNotifier) { - defer onDone.OnDone() +func (lw *ldsWatcher) OnError(err error) { if lw.parent.closed.HasFired() { lw.logger.Warningf("Resource %q received error: %v after listener was closed", lw.name, err) return @@ -435,8 +433,7 @@ func (lw *ldsWatcher) OnError(err error, onDone xdsresource.DoneNotifier) { // continue to use the old configuration. } -func (lw *ldsWatcher) OnResourceDoesNotExist(onDone xdsresource.DoneNotifier) { - defer onDone.OnDone() +func (lw *ldsWatcher) OnResourceDoesNotExist() { if lw.parent.closed.HasFired() { lw.logger.Warningf("Resource %q received resource-does-not-exist error after listener was closed", lw.name) return diff --git a/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go index 069db8e5d3d2..67cde4602894 100644 --- a/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go +++ b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go @@ -147,8 +147,7 @@ type rdsWatcher struct { canceled bool // eats callbacks if true } -func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData, onDone xdsresource.DoneNotifier) { - defer onDone.OnDone() +func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) { rw.mu.Lock() if rw.canceled { rw.mu.Unlock() @@ -161,8 +160,7 @@ func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData, onDo rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{data: &update.Resource}) } -func (rw *rdsWatcher) OnError(err error, onDone xdsresource.DoneNotifier) { - defer onDone.OnDone() +func (rw *rdsWatcher) OnError(err error) { rw.mu.Lock() if rw.canceled { rw.mu.Unlock() @@ -175,8 +173,7 @@ func (rw *rdsWatcher) OnError(err error, onDone xdsresource.DoneNotifier) { rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{err: err}) } -func (rw *rdsWatcher) OnResourceDoesNotExist(onDone xdsresource.DoneNotifier) { - defer onDone.OnDone() +func (rw *rdsWatcher) OnResourceDoesNotExist() { rw.mu.Lock() if rw.canceled { rw.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go index 0e0a4d901316..b0763a024031 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -118,12 +118,12 @@ func newAuthority(args authorityArgs) (*authority, error) { } tr, err := transport.New(transport.Options{ - ServerCfg: args.serverCfg, + ServerCfg: *args.serverCfg, OnRecvHandler: ret.handleResourceUpdate, OnErrorHandler: ret.newConnectionError, OnSendHandler: ret.transportOnSendHandler, Logger: args.logger, - NodeProto: args.bootstrapCfg.Node(), + NodeProto: args.bootstrapCfg.NodeProto, }) if err != nil { return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) @@ -148,7 +148,7 @@ func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { a.startWatchTimersLocked(rType, u.ResourceNames) } -func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate, fc *transport.ADSFlowControl) error { +func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate) error { rType := a.resourceTypeGetter(resourceUpdate.URL) if rType == nil { return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) @@ -159,27 +159,14 @@ func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate ServerConfig: a.serverCfg, } updates, md, err := decodeAllResources(opts, rType, resourceUpdate) - a.updateResourceStateAndScheduleCallbacks(rType, updates, md, fc) + a.updateResourceStateAndScheduleCallbacks(rType, updates, md) return err } -func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata, fc *transport.ADSFlowControl) { +func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata) { a.resourcesMu.Lock() defer a.resourcesMu.Unlock() - // We build a list of callback funcs to invoke, and invoke them at the end - // of this method instead of inline (when handling the update for a - // particular resource), because we want to make sure that all calls to - // `fc.Add` happen before any callbacks are invoked. This will ensure that - // the next read is never attempted before all callbacks are invoked, and - // the watchers have processed the update. - funcsToSchedule := []func(context.Context){} - defer func() { - for _, f := range funcsToSchedule { - a.serializer.ScheduleOr(f, fc.OnDone) - } - }() - resourceStates := a.resources[rType] for name, uErr := range updates { if state, ok := resourceStates[name]; ok { @@ -223,8 +210,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty for watcher := range state.watchers { watcher := watcher err := uErr.err - fc.Add() - funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnError(err, fc) }) + a.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) } continue } @@ -239,14 +225,11 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty for watcher := range state.watchers { watcher := watcher resource := uErr.resource - fc.Add() - funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnUpdate(resource, fc) }) + a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) } } // Sync cache. - if a.logger.V(2) { - a.logger.Infof("Resource type %q with name %q added to cache", rType.TypeName(), name) - } + a.logger.Debugf("Resource type %q with name %q added to cache", rType.TypeName(), name) state.cache = uErr.resource // Set status to ACK, and clear error state. The metadata might be a // NACK metadata because some other resources in the same response @@ -300,7 +283,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // resource deletion is to be ignored, the resource is not removed from // the cache and the corresponding OnResourceDoesNotExist() callback is // not invoked on the watchers. - if a.serverCfg.ServerFeaturesIgnoreResourceDeletion() { + if a.serverCfg.IgnoreResourceDeletion { if !state.deletionIgnored { state.deletionIgnored = true a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) @@ -315,8 +298,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher - fc.Add() - funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnResourceDoesNotExist(fc) }) + a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) } } } @@ -444,8 +426,8 @@ func (a *authority) newConnectionError(err error) { // Propagate the connection error from the transport layer to all watchers. for watcher := range state.watchers { watcher := watcher - a.serializer.TrySchedule(func(context.Context) { - watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err), xdsresource.NopDoneNotifier{}) + a.serializer.Schedule(func(context.Context) { + watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) }) } } @@ -472,9 +454,7 @@ func (a *authority) close() { } func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { - if a.logger.V(2) { - a.logger.Infof("New watch for type %q, resource name %q", rType.TypeName(), resourceName) - } + a.logger.Debugf("New watch for type %q, resource name %q", rType.TypeName(), resourceName) a.resourcesMu.Lock() defer a.resourcesMu.Unlock() @@ -491,9 +471,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // instruct the transport layer to send a DiscoveryRequest for the same. state := resources[resourceName] if state == nil { - if a.logger.V(2) { - a.logger.Infof("First watch for type %q, resource name %q", rType.TypeName(), resourceName) - } + a.logger.Debugf("First watch for type %q, resource name %q", rType.TypeName(), resourceName) state = &resourceState{ watchers: make(map[xdsresource.ResourceWatcher]bool), md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, @@ -511,7 +489,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) } resource := state.cache - a.serializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, xdsresource.NopDoneNotifier{}) }) + a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) } return func() { @@ -532,9 +510,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // There are no more watchers for this resource, delete the state // associated with it, and instruct the transport to send a request // which does not include this resource name. - if a.logger.V(2) { - a.logger.Infof("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) - } + a.logger.Debugf("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) delete(resources, resourceName) a.sendDiscoveryRequestLocked(rType, resources) } @@ -564,7 +540,7 @@ func (a *authority) handleWatchTimerExpiryLocked(rType xdsresource.Type, resourc state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher - a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(xdsresource.NopDoneNotifier{}) }) + a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) } } @@ -590,13 +566,13 @@ func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, re state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher - a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(xdsresource.NopDoneNotifier{}) }) + a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) } } // sendDiscoveryRequestLocked sends a discovery request for the specified // resource type and resource names. Even though this method does not directly -// access the resource cache, it is important that `resourcesMu` be held when +// access the resource cache, it is important that `resourcesMu` be beld when // calling this method to ensure that a consistent snapshot of resource names is // being requested. func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { @@ -613,7 +589,7 @@ func (a *authority) reportLoad() (*load.Store, func()) { return a.transport.ReportLoad() } -func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig { +func (a *authority) dumpResources() ([]*v3statuspb.ClientConfig_GenericXdsConfig, error) { a.resourcesMu.Lock() defer a.resourcesMu.Unlock() @@ -643,7 +619,7 @@ func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig ret = append(ret, config) } } - return ret + return ret, nil } func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index 144cb5bd7686..468c5fb31b9b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -24,6 +24,8 @@ import ( "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) // XDSClient is a full fledged gRPC client which queries a set of discovery APIs @@ -46,6 +48,10 @@ type XDSClient interface { // the watcher is canceled. Callers need to handle this case. WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) + // DumpResources returns the status of the xDS resources. Returns a map of + // resource type URLs to a map of resource names to resource state. + DumpResources() (*v3statuspb.ClientStatusResponse, error) + ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) BootstrapConfig() *bootstrap.Config diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go index 6097e86925e6..8dec8f34b209 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go @@ -19,7 +19,9 @@ package xdsclient import ( + "bytes" "context" + "encoding/json" "fmt" "sync" "time" @@ -31,33 +33,44 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -// NameForServer represents the value to be passed as name when creating an xDS -// client from xDS-enabled gRPC servers. This is a well-known dedicated key -// value, and is defined in gRFC A71. -const NameForServer = "#server" +// New returns a new xDS client configured by the bootstrap file specified in env +// variable GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG. +// +// The returned client is a reference counted singleton instance. This function +// creates a new client only when one doesn't already exist. +// +// The second return value represents a close function which releases the +// caller's reference on the returned client. The caller is expected to invoke +// it once they are done using the client. The underlying client will be closed +// only when all references are released, and it is safe for the caller to +// invoke this close function multiple times. +func New() (XDSClient, func(), error) { + return newRefCountedWithConfig(nil) +} -// New returns an xDS client configured with bootstrap configuration specified -// by the ordered list: -// - file name containing the configuration specified by GRPC_XDS_BOOTSTRAP -// - actual configuration specified by GRPC_XDS_BOOTSTRAP_CONFIG -// - fallback configuration set using bootstrap.SetFallbackBootstrapConfig +// NewWithConfig is similar to New, except that it uses the provided bootstrap +// configuration to create the xDS client if and only if the bootstrap +// environment variables are not defined. // -// gRPC client implementations are expected to pass the channel's target URI for -// the name field, while server implementations are expected to pass a dedicated -// well-known value "#server", as specified in gRFC A71. The returned client is -// a reference counted implementation shared among callers using the same name. +// The returned client is a reference counted singleton instance. This function +// creates a new client only when one doesn't already exist. // // The second return value represents a close function which releases the // caller's reference on the returned client. The caller is expected to invoke // it once they are done using the client. The underlying client will be closed // only when all references are released, and it is safe for the caller to // invoke this close function multiple times. -func New(name string) (XDSClient, func(), error) { - return newRefCounted(name, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) +// +// # Internal Only +// +// This function should ONLY be used by the internal google-c2p resolver. +// DO NOT use this elsewhere. Use New() instead. +func NewWithConfig(config *bootstrap.Config) (XDSClient, func(), error) { + return newRefCountedWithConfig(config) } -// newClientImpl returns a new xdsClient with the given config. -func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { +// newWithConfig returns a new xdsClient with the given config. +func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { ctx, cancel := context.WithCancel(context.Background()) c := &clientImpl{ done: grpcsync.NewEvent(), @@ -71,14 +84,13 @@ func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, i } c.logger = prefixLogger(c) + c.logger.Infof("Created client to xDS management server: %s", config.XDSServer) return c, nil } // OptionsForTesting contains options to configure xDS client creation for // testing purposes only. type OptionsForTesting struct { - // Name is a unique name for this xDS client. - Name string // Contents contain a JSON representation of the bootstrap configuration to // be used when creating the xDS client. Contents []byte @@ -102,9 +114,6 @@ type OptionsForTesting struct { // // This function should ONLY be used for testing purposes. func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { - if opts.Name == "" { - return nil, nil, fmt.Errorf("opts.Name field must be non-empty") - } if opts.WatchExpiryTimeout == 0 { opts.WatchExpiryTimeout = defaultWatchExpiryTimeout } @@ -112,32 +121,49 @@ func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { opts.AuthorityIdleTimeout = defaultIdleAuthorityDeleteTimeout } - if err := bootstrap.SetFallbackBootstrapConfig(opts.Contents); err != nil { - return nil, nil, err + // Normalize the input configuration, as this is used as the key in the map + // of xDS clients created for testing. + buf := bytes.Buffer{} + err := json.Indent(&buf, opts.Contents, "", "") + if err != nil { + return nil, nil, fmt.Errorf("xds: error normalizing JSON: %v", err) } - client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout) - return client, func() { bootstrap.UnsetFallbackBootstrapConfigForTesting(); cancel() }, err -} + opts.Contents = bytes.TrimSpace(buf.Bytes()) -// GetForTesting returns an xDS client created earlier using the given name. -// -// The second return value represents a close function which the caller is -// expected to invoke once they are done using the client. It is safe for the -// caller to invoke this close function multiple times. -// -// # Testing Only -// -// This function should ONLY be used for testing purposes. -func GetForTesting(name string) (XDSClient, func(), error) { clientsMu.Lock() defer clientsMu.Unlock() - c, ok := clients[name] - if !ok { - return nil, nil, fmt.Errorf("xDS client with name %q not found", name) + var client *clientRefCounted + closeFunc := grpcsync.OnceFunc(func() { + clientsMu.Lock() + defer clientsMu.Unlock() + if client.decrRef() == 0 { + client.close() + delete(clients, string(opts.Contents)) + } + }) + + // If an xDS client exists for the given configuration, increment its + // reference count and return it. + if c := clients[string(opts.Contents)]; c != nil { + c.incrRef() + client = c + return c, closeFunc, nil + } + + // Create a new xDS client for the given configuration + bcfg, err := bootstrap.NewConfigFromContents(opts.Contents) + if err != nil { + return nil, nil, fmt.Errorf("bootstrap config %s: %v", string(opts.Contents), err) + } + cImpl, err := newWithConfig(bcfg, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout) + if err != nil { + return nil, nil, fmt.Errorf("creating xDS client: %v", err) } - c.incrRef() - return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil + client = &clientRefCounted{clientImpl: cImpl, refCount: 1} + clients[string(opts.Contents)] = client + + return client, closeFunc, nil } func init() { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go deleted file mode 100644 index 9edd0ce7f276..000000000000 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "fmt" - "sync/atomic" - "time" - - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/xds/bootstrap" -) - -const ( - defaultWatchExpiryTimeout = 15 * time.Second - defaultIdleAuthorityDeleteTimeout = 5 * time.Minute -) - -var ( - // The following functions are no-ops in the actual code, but can be - // overridden in tests to give them visibility into certain events. - xdsClientImplCreateHook = func(name string) {} - xdsClientImplCloseHook = func(name string) {} -) - -func clientRefCountedClose(name string) { - clientsMu.Lock() - defer clientsMu.Unlock() - - client, ok := clients[name] - if !ok { - logger.Errorf("Attempt to close a non-existent xDS client with name %s", name) - return - } - if client.decrRef() != 0 { - return - } - client.clientImpl.close() - xdsClientImplCloseHook(name) - delete(clients, name) - -} - -// newRefCounted creates a new reference counted xDS client implementation for -// name, if one does not exist already. If an xDS client for the given name -// exists, it gets a reference to it and returns it. -func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Duration) (XDSClient, func(), error) { - clientsMu.Lock() - defer clientsMu.Unlock() - - if c := clients[name]; c != nil { - c.incrRef() - return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil - } - - // Create the new client implementation. - config, err := bootstrap.GetConfiguration() - if err != nil { - return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) - } - c, err := newClientImpl(config, watchExpiryTimeout, idleAuthorityTimeout) - if err != nil { - return nil, nil, err - } - c.logger.Infof("Created client with name %q and bootstrap configuration:\n %s", name, config) - client := &clientRefCounted{clientImpl: c, refCount: 1} - clients[name] = client - xdsClientImplCreateHook(name) - - logger.Infof("xDS node ID: %s", config.Node().GetId()) - return client, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil -} - -// clientRefCounted is ref-counted, and to be shared by the xds resolver and -// balancer implementations, across multiple ClientConns and Servers. -type clientRefCounted struct { - *clientImpl - - refCount int32 // accessed atomically -} - -func (c *clientRefCounted) incrRef() int32 { - return atomic.AddInt32(&c.refCount, 1) -} - -func (c *clientRefCounted) decrRef() int32 { - return atomic.AddInt32(&c.refCount, -1) -} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go index 9f619016a08e..7321250d6ab2 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go @@ -85,16 +85,16 @@ func (c *clientImpl) close() { c.authorityMu.Unlock() c.serializerClose() - for _, s := range c.config.XDSServers() { - for _, f := range s.Cleanups() { - f() - } + for _, f := range c.config.XDSServer.Cleanups { + f() } - for _, a := range c.config.Authorities() { - for _, s := range a.XDSServers { - for _, f := range s.Cleanups() { - f() - } + for _, a := range c.config.Authorities { + if a.XDSServer == nil { + // The server for this authority is the top-level one, cleaned up above. + continue + } + for _, f := range a.XDSServer.Cleanups { + f() } } c.logger.Infof("Shutdown") diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go index 1ce20fabdf83..69db79ee8913 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go @@ -45,18 +45,14 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (*authority, func(), err return nil, nil, errors.New("the xds-client is closed") } - config := c.config.XDSServers()[0] + config := c.config.XDSServer if scheme == xdsresource.FederationScheme { - authorities := c.config.Authorities() - if authorities == nil { - return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) - } - cfg, ok := authorities[authority] + cfg, ok := c.config.Authorities[authority] if !ok { return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) } - if len(cfg.XDSServers) >= 1 { - config = cfg.XDSServers[0] + if cfg.XDSServer != nil { + config = cfg.XDSServer } } @@ -114,7 +110,7 @@ func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *auth serializer: c.serializer, resourceTypeGetter: c.resourceTypes.get, watchExpiryTimeout: c.watchExpiryTimeout, - logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI())), + logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI)), }) if err != nil { return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go index f4d7b0a0115c..8fbc010f743d 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go @@ -22,32 +22,27 @@ import ( v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) -// dumpResources returns the status and contents of all xDS resources. -func (c *clientImpl) dumpResources() *v3statuspb.ClientConfig { +// DumpResources returns the status and contents of all xDS resources. +func (c *clientImpl) DumpResources() (*v3statuspb.ClientStatusResponse, error) { c.authorityMu.Lock() defer c.authorityMu.Unlock() var retCfg []*v3statuspb.ClientConfig_GenericXdsConfig for _, a := range c.authorities { - retCfg = append(retCfg, a.dumpResources()...) - } - - return &v3statuspb.ClientConfig{ - Node: c.config.Node(), - GenericXdsConfigs: retCfg, + cfg, err := a.dumpResources() + if err != nil { + return nil, err + } + retCfg = append(retCfg, cfg...) } -} -// DumpResources returns the status and contents of all xDS resources. -func DumpResources() *v3statuspb.ClientStatusResponse { - clientsMu.Lock() - defer clientsMu.Unlock() - - resp := &v3statuspb.ClientStatusResponse{} - for key, client := range clients { - cfg := client.dumpResources() - cfg.ClientScope = key - resp.Config = append(resp.Config, cfg) - } - return resp + return &v3statuspb.ClientStatusResponse{ + Config: []*v3statuspb.ClientConfig{ + { + // TODO: Populate ClientScope. Need to update go-control-plane dependency. + Node: c.config.NodeProto, + GenericXdsConfigs: retCfg, + }, + }, + }, nil } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go index b42e43a56976..ff2f5e9d6728 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go @@ -32,7 +32,7 @@ func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, fu a, err := c.newAuthorityLocked(server) if err != nil { c.authorityMu.Unlock() - c.logger.Warningf("Failed to connect to the management server to report load for authority %q: %v", server, err) + c.logger.Infof("xds: failed to connect to the control plane to do load reporting for authority %q: %v", server, err) return nil, func() {} } // Hold the ref before starting load reporting. diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go index 7a5dddfd2b8b..22b8eb0107c9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -44,7 +44,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, if err := c.resourceTypes.maybeRegister(rType); err != nil { logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeName(), resourceName) - c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, xdsresource.NopDoneNotifier{}) }) + c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) return func() {} } @@ -54,7 +54,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, a, unref, err := c.findAuthority(n) if err != nil { logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) - c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, xdsresource.NopDoneNotifier{}) }) + c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) return func() {} } cancelF := a.watchResource(rType, n.String(), watcher) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go deleted file mode 100644 index e12610744109..000000000000 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package internal contains functionality internal to the xdsclient package. -package internal - -// The following vars can be overridden by tests. -var ( - // NewADSStream is a function that returns a new ADS stream. - NewADSStream any // func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) -) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go index f1e265ee7ddf..1f266ae20185 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go @@ -174,7 +174,6 @@ func (ls *perClusterStore) CallStarted(locality string) { p, _ = ls.localityRPCCount.LoadOrStore(locality, tp) } p.(*rpcCountData).incrInProgress() - p.(*rpcCountData).incrIssued() } // CallFinished adds one call finished record for the given locality. @@ -249,8 +248,6 @@ type RequestData struct { Errored uint64 // InProgress is the number of requests in flight. InProgress uint64 - // Issued is the total number requests that were sent. - Issued uint64 } // ServerLoadData contains server load data. @@ -299,8 +296,7 @@ func (ls *perClusterStore) stats() *Data { succeeded := countData.loadAndClearSucceeded() inProgress := countData.loadInProgress() errored := countData.loadAndClearErrored() - issued := countData.loadAndClearIssued() - if succeeded == 0 && inProgress == 0 && errored == 0 && issued == 0 { + if succeeded == 0 && inProgress == 0 && errored == 0 { return true } @@ -309,7 +305,6 @@ func (ls *perClusterStore) stats() *Data { Succeeded: succeeded, Errored: errored, InProgress: inProgress, - Issued: issued, }, LoadStats: make(map[string]ServerLoadData), } @@ -344,7 +339,6 @@ type rpcCountData struct { succeeded *uint64 errored *uint64 inProgress *uint64 - issued *uint64 // Map from load desc to load data (sum+count). Loading data from map is // atomic, but updating data takes a lock, which could cause contention when @@ -359,7 +353,6 @@ func newRPCCountData() *rpcCountData { succeeded: new(uint64), errored: new(uint64), inProgress: new(uint64), - issued: new(uint64), } } @@ -391,14 +384,6 @@ func (rcd *rpcCountData) loadInProgress() uint64 { return atomic.LoadUint64(rcd.inProgress) // InProgress count is not clear when reading. } -func (rcd *rpcCountData) incrIssued() { - atomic.AddUint64(rcd.issued, 1) -} - -func (rcd *rpcCountData) loadAndClearIssued() uint64 { - return atomic.SwapUint64(rcd.issued, 0) -} - func (rcd *rpcCountData) addServerLoad(name string, d float64) { loads, ok := rcd.serverLoads.Load(name) if !ok { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go new file mode 100644 index 000000000000..f981bfebb582 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go @@ -0,0 +1,115 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" +) + +const ( + defaultWatchExpiryTimeout = 15 * time.Second + defaultIdleAuthorityDeleteTimeout = 5 * time.Minute +) + +var ( + // This is the client returned by New(). It contains one client implementation, + // and maintains the refcount. + singletonMu sync.Mutex + singletonClient *clientRefCounted + + // The following functions are no-ops in the actual code, but can be + // overridden in tests to give them visibility into certain events. + singletonClientImplCreateHook = func() {} + singletonClientImplCloseHook = func() {} +) + +// To override in tests. +var bootstrapNewConfig = bootstrap.NewConfig + +func clientRefCountedClose() { + singletonMu.Lock() + defer singletonMu.Unlock() + + if singletonClient.decrRef() != 0 { + return + } + singletonClient.clientImpl.close() + singletonClientImplCloseHook() + singletonClient = nil +} + +func newRefCountedWithConfig(fallbackConfig *bootstrap.Config) (XDSClient, func(), error) { + singletonMu.Lock() + defer singletonMu.Unlock() + + if singletonClient != nil { + singletonClient.incrRef() + return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil + + } + + // Use fallbackConfig only if bootstrap env vars are unspecified. + var config *bootstrap.Config + if envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" { + if fallbackConfig == nil { + return nil, nil, fmt.Errorf("xds: bootstrap env vars are unspecified and provided fallback config is nil") + } + config = fallbackConfig + } else { + var err error + config, err = bootstrapNewConfig() + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) + } + } + + // Create the new client implementation. + c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + return nil, nil, err + } + singletonClient = &clientRefCounted{clientImpl: c, refCount: 1} + singletonClientImplCreateHook() + + logger.Infof("xDS node ID: %s", config.NodeProto.GetId()) + return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil +} + +// clientRefCounted is ref-counted, and to be shared by the xds resolver and +// balancer implementations, across multiple ClientConns and Servers. +type clientRefCounted struct { + *clientImpl + + refCount int32 // accessed atomically +} + +func (c *clientRefCounted) incrRef() int32 { + return atomic.AddInt32(&c.refCount, 1) +} + +func (c *clientRefCounted) decrRef() int32 { + return atomic.AddInt32(&c.refCount, -1) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go deleted file mode 100644 index 9acc33cbbf8d..000000000000 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package internal contains functionality internal to the transport package. -package internal - -// The following vars can be overridden by tests. -var ( - // GRPCNewClient creates a new gRPC Client. - GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) -) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go index e47fdd9846ba..289fd62cbc75 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go @@ -223,7 +223,6 @@ func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) e TotalSuccessfulRequests: localityData.RequestStats.Succeeded, TotalRequestsInProgress: localityData.RequestStats.InProgress, TotalErrorRequests: localityData.RequestStats.Errored, - TotalIssuedRequests: localityData.RequestStats.Issued, LoadMetricStats: loadMetricStats, UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. }) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go index 6f156398b9bd..421ba78074c0 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go @@ -24,7 +24,6 @@ import ( "errors" "fmt" "sync" - "sync/atomic" "time" "google.golang.org/grpc" @@ -36,9 +35,7 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/keepalive" - xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" "google.golang.org/grpc/xds/internal/xdsclient/load" - transportinternal "google.golang.org/grpc/xds/internal/xdsclient/transport/internal" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" @@ -48,23 +45,17 @@ import ( statuspb "google.golang.org/genproto/googleapis/rpc/status" ) -type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient - -func init() { - transportinternal.GRPCNewClient = grpc.NewClient - xdsclientinternal.NewADSStream = func(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { - return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) - } -} - // Any per-RPC level logs which print complete request or response messages // should be gated at this verbosity level. Other per-RPC level logs which print -// terse output should be at `INFO` and verbosity 2. +// terse output should be at `INFO` and verbosity 2, which corresponds to using +// the `Debugf` method on the logger. const perRPCVerbosityLevel = 9 +type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient + // Transport provides a resource-type agnostic implementation of the xDS // transport protocol. At this layer, resource contents are supposed to be -// opaque blobs which should be meaningful only to the xDS data model layer +// opaque blobs which should be be meaningful only to the xDS data model layer // which is implemented by the `xdsresource` package. // // Under the hood, it owns the gRPC connection to a single management server and @@ -86,7 +77,7 @@ type Transport struct { lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. // These channels enable synchronization amongst the different goroutines - // spawned by the transport, and between asynchronous events resulting from + // spawned by the transport, and between asynchorous events resulting from // receipt of responses from the management server. adsStreamCh chan adsStream // New ADS streams are pushed here. adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. @@ -121,11 +112,7 @@ type Transport struct { // cause the transport layer to send an ACK to the management server. A non-nil // error is returned from this function when the data model layer believes // otherwise, and this will cause the transport layer to send a NACK. -// -// The implementation is expected to use the ADS flow control object passed to -// it, and increment the number of watchers to whom the update is sent to, and -// eventually decrement the number once the update is consumed by the watchers. -type OnRecvHandlerFunc func(update ResourceUpdate, fc *ADSFlowControl) error +type OnRecvHandlerFunc func(update ResourceUpdate) error // OnSendHandlerFunc is the implementation at the authority, which handles state // changes for the resource watch and stop watch timers accordingly. @@ -148,7 +135,7 @@ type ResourceUpdate struct { type Options struct { // ServerCfg contains all the configuration required to connect to the xDS // management server. - ServerCfg *bootstrap.ServerConfig + ServerCfg bootstrap.ServerConfig // OnRecvHandler is the component which makes ACK/NACK decisions based on // the received resources. // @@ -182,9 +169,16 @@ type Options struct { NodeProto *v3corepb.Node } +// For overriding in unit tests. +var grpcDial = grpc.Dial + // New creates a new Transport. func New(opts Options) (*Transport, error) { switch { + case opts.ServerCfg.ServerURI == "": + return nil, errors.New("missing server URI when creating a new transport") + case opts.ServerCfg.CredsDialOption() == nil: + return nil, errors.New("missing credentials when creating a new transport") case opts.OnRecvHandler == nil: return nil, errors.New("missing OnRecv callback handler when creating a new transport") case opts.OnErrorHandler == nil: @@ -203,13 +197,11 @@ func New(opts Options) (*Transport, error) { Timeout: 20 * time.Second, }), } - grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) - cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...) + cc, err := grpcDial(opts.ServerCfg.ServerURI, dopts...) if err != nil { // An error from a non-blocking dial indicates something serious. - return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI(), err) + return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI, err) } - cc.Connect() boff := opts.Backoff if boff == nil { @@ -217,7 +209,7 @@ func New(opts Options) (*Transport, error) { } ret := &Transport{ cc: cc, - serverURI: opts.ServerCfg.ServerURI(), + serverURI: opts.ServerCfg.ServerURI, onRecvHandler: opts.OnRecvHandler, onErrorHandler: opts.OnErrorHandler, onSendHandler: opts.OnSendHandler, @@ -271,6 +263,12 @@ func (t *Transport) SendRequest(url string, resources []string) { }) } +func (t *Transport) newAggregatedDiscoveryServiceStream(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { + // The transport retries the stream with an exponential backoff whenever the + // stream breaks without ever having seen a response. + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) +} + // ResourceSendInfo wraps the names and url of resources sent to the management // server. This is used by the `authority` type to start/stop the watch timer // associated with every resource in the update. @@ -300,9 +298,7 @@ func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, send if t.logger.V(perRPCVerbosityLevel) { t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) } else { - if t.logger.V(2) { - t.logger.Infof("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) - } + t.logger.Debugf("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) } t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) return nil @@ -315,8 +311,8 @@ func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (re } if t.logger.V(perRPCVerbosityLevel) { t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) - } else if t.logger.V(2) { - t.logger.Infof("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) + } else { + t.logger.Debugf("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) } return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil } @@ -332,8 +328,7 @@ func (t *Transport) adsRunner(ctx context.Context) { // We reset backoff state when we successfully receive at least one // message from the server. runStreamWithBackoff := func() error { - newStream := xdsclientinternal.NewADSStream.(func(context.Context, *grpc.ClientConn) (adsStream, error)) - stream, err := newStream(ctx, t.cc) + stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) if err != nil { t.onErrorHandler(err) t.logger.Warningf("Creating new ADS stream failed: %v", err) @@ -346,7 +341,7 @@ func (t *Transport) adsRunner(ctx context.Context) { default: } t.adsStreamCh <- stream - msgReceived := t.recv(ctx, stream) + msgReceived := t.recv(stream) if msgReceived { return backoff.ErrResetBackoff } @@ -466,21 +461,9 @@ func (t *Transport) sendExisting(stream adsStream) (sentNodeProto bool, err erro // recv receives xDS responses on the provided ADS stream and branches out to // message specific handlers. Returns true if at least one message was // successfully received. -func (t *Transport) recv(ctx context.Context, stream adsStream) bool { - // Initialize the flow control quota for the stream. This helps to block the - // next read until the previous one is consumed by all watchers. - fc := NewADSStreamFlowControl() - +func (t *Transport) recv(stream adsStream) bool { msgReceived := false for { - // Wait for ADS stream level flow control to be available. - if !fc.Wait(ctx) { - if t.logger.V(2) { - t.logger.Infof("ADS stream context canceled") - } - return msgReceived - } - resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) if err != nil { // Note that we do not consider it an error if the ADS stream was closed @@ -498,12 +481,12 @@ func (t *Transport) recv(ctx context.Context, stream adsStream) bool { } msgReceived = true - u := ResourceUpdate{ + err = t.onRecvHandler(ResourceUpdate{ Resources: resources, URL: url, Version: rVersion, - } - if err = t.onRecvHandler(u, fc); xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { + }) + if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { t.logger.Warningf("%v", err) continue } @@ -529,9 +512,7 @@ func (t *Transport) recv(ctx context.Context, stream adsStream) bool { stream: stream, version: rVersion, }) - if t.logger.V(2) { - t.logger.Infof("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) - } + t.logger.Debugf("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) } } @@ -637,73 +618,3 @@ func (t *Transport) Close() { func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State { return t.cc.GetState() } - -// ADSFlowControl implements ADS stream level flow control that enables the -// transport to block the reading of the next message off of the stream until -// the previous update is consumed by all watchers. -// -// The lifetime of the flow control is tied to the lifetime of the stream. -// -// New instances must be created with a call to NewADSStreamFlowControl. -type ADSFlowControl struct { - logger *grpclog.PrefixLogger - - // Count of watchers yet to consume the most recent update. - pending atomic.Int64 - // Channel used to notify when all the watchers have consumed the most - // recent update. Wait() blocks on reading a value from this channel. - readyCh chan struct{} -} - -// NewADSStreamFlowControl returns a new ADSFlowControl. -func NewADSStreamFlowControl() *ADSFlowControl { - return &ADSFlowControl{readyCh: make(chan struct{}, 1)} -} - -// Add increments the number of watchers (by one) who are yet to consume the -// most recent update received on the ADS stream. -func (fc *ADSFlowControl) Add() { - fc.pending.Add(1) -} - -// Wait blocks until all the watchers have consumed the most recent update and -// returns true. If the context expires before that, it returns false. -func (fc *ADSFlowControl) Wait(ctx context.Context) bool { - // If there are no watchers or none with pending updates, there is no need - // to block. - if n := fc.pending.Load(); n == 0 { - // If all watchers finished processing the most recent update before the - // `recv` goroutine made the next call to `Wait()`, there would be an - // entry in the readyCh channel that needs to be drained to ensure that - // the next call to `Wait()` doesn't unblock before it actually should. - select { - case <-fc.readyCh: - default: - } - return true - } - - select { - case <-ctx.Done(): - return false - case <-fc.readyCh: - return true - } -} - -// OnDone indicates that a watcher has consumed the most recent update. -func (fc *ADSFlowControl) OnDone() { - if pending := fc.pending.Add(-1); pending != 0 { - return - } - - select { - // Writes to the readyCh channel should not block ideally. The default - // branch here is to appease the paranoid mind. - case fc.readyCh <- struct{}{}: - default: - if fc.logger.V(2) { - fc.logger.Infof("ADS stream flow control readyCh is full") - } - } -} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index fb6f66f20dbd..5ac7f0312239 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -111,10 +111,7 @@ func (c *ClusterResourceData) Raw() *anypb.Any { // corresponding to the cluster resource being watched. type ClusterWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnUpdate(*ClusterResourceData, DoneNotifier) + OnUpdate(*ClusterResourceData) // OnError is invoked under different error conditions including but not // limited to the following: @@ -124,34 +121,28 @@ type ClusterWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnError(error, DoneNotifier) + OnError(error) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnResourceDoesNotExist(DoneNotifier) + OnResourceDoesNotExist() } type delegatingClusterWatcher struct { watcher ClusterWatcher } -func (d *delegatingClusterWatcher) OnUpdate(data ResourceData, done DoneNotifier) { +func (d *delegatingClusterWatcher) OnUpdate(data ResourceData) { c := data.(*ClusterResourceData) - d.watcher.OnUpdate(c, done) + d.watcher.OnUpdate(c) } -func (d *delegatingClusterWatcher) OnError(err error, done DoneNotifier) { - d.watcher.OnError(err, done) +func (d *delegatingClusterWatcher) OnError(err error) { + d.watcher.OnError(err) } -func (d *delegatingClusterWatcher) OnResourceDoesNotExist(done DoneNotifier) { - d.watcher.OnResourceDoesNotExist(done) +func (d *delegatingClusterWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() } // WatchCluster uses xDS to discover the configuration associated with the diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index 68e3a2548e64..775a8aa19423 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -107,10 +107,7 @@ func (e *EndpointsResourceData) Raw() *anypb.Any { // events corresponding to the endpoints resource being watched. type EndpointsWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnUpdate(*EndpointsResourceData, DoneNotifier) + OnUpdate(*EndpointsResourceData) // OnError is invoked under different error conditions including but not // limited to the following: @@ -120,34 +117,28 @@ type EndpointsWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnError(error, DoneNotifier) + OnError(error) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnResourceDoesNotExist(DoneNotifier) + OnResourceDoesNotExist() } type delegatingEndpointsWatcher struct { watcher EndpointsWatcher } -func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData, done DoneNotifier) { +func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData) { e := data.(*EndpointsResourceData) - d.watcher.OnUpdate(e, done) + d.watcher.OnUpdate(e) } -func (d *delegatingEndpointsWatcher) OnError(err error, done DoneNotifier) { - d.watcher.OnError(err, done) +func (d *delegatingEndpointsWatcher) OnError(err error) { + d.watcher.OnError(err) } -func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist(done DoneNotifier) { - d.watcher.OnResourceDoesNotExist(done) +func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() } // WatchEndpoints uses xDS to discover the configuration associated with the diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 419161e69a83..4337e4e063f7 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -60,12 +60,12 @@ func securityConfigValidator(bc *bootstrap.Config, sc *SecurityConfig) error { return nil } if sc.IdentityInstanceName != "" { - if _, ok := bc.CertProviderConfigs()[sc.IdentityInstanceName]; !ok { + if _, ok := bc.CertProviderConfigs[sc.IdentityInstanceName]; !ok { return fmt.Errorf("identity certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) } } if sc.RootInstanceName != "" { - if _, ok := bc.CertProviderConfigs()[sc.RootInstanceName]; !ok { + if _, ok := bc.CertProviderConfigs[sc.RootInstanceName]; !ok { return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) } } @@ -144,10 +144,7 @@ func (l *ListenerResourceData) Raw() *anypb.Any { // events corresponding to the listener resource being watched. type ListenerWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnUpdate(*ListenerResourceData, DoneNotifier) + OnUpdate(*ListenerResourceData) // OnError is invoked under different error conditions including but not // limited to the following: @@ -157,34 +154,28 @@ type ListenerWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnError(error, DoneNotifier) + OnError(error) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnResourceDoesNotExist(DoneNotifier) + OnResourceDoesNotExist() } type delegatingListenerWatcher struct { watcher ListenerWatcher } -func (d *delegatingListenerWatcher) OnUpdate(data ResourceData, done DoneNotifier) { +func (d *delegatingListenerWatcher) OnUpdate(data ResourceData) { l := data.(*ListenerResourceData) - d.watcher.OnUpdate(l, done) + d.watcher.OnUpdate(l) } -func (d *delegatingListenerWatcher) OnError(err error, done DoneNotifier) { - d.watcher.OnError(err, done) +func (d *delegatingListenerWatcher) OnError(err error) { + d.watcher.OnError(err) } -func (d *delegatingListenerWatcher) OnResourceDoesNotExist(done DoneNotifier) { - d.watcher.OnResourceDoesNotExist(done) +func (d *delegatingListenerWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() } // WatchListener uses xDS to discover the configuration associated with the diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go index d4377ed50496..3b3a8e79c2b9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go @@ -52,29 +52,13 @@ type Producer interface { WatchResource(rType Type, resourceName string, watcher ResourceWatcher) (cancel func()) } -// DoneNotifier wraps the OnDone callback to be invoked once a resource update -// is processed by the watcher. -type DoneNotifier interface { - OnDone() -} - -// NopDoneNotifier is a concrete implementation of the DoneNotifier interface, -// that serves as a convenient placeholder when the callback is not needed. -type NopDoneNotifier struct{} - -// OnDone implements the DoneNotifier interface. -func (NopDoneNotifier) OnDone() {} - // ResourceWatcher wraps the callbacks to be invoked for different events // corresponding to the resource being watched. type ResourceWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. // The ResourceData parameter needs to be type asserted to the appropriate // type for the resource being watched. - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnUpdate(ResourceData, DoneNotifier) + OnUpdate(ResourceData) // OnError is invoked under different error conditions including but not // limited to the following: @@ -84,11 +68,11 @@ type ResourceWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error, DoneNotifier) + OnError(error) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist(DoneNotifier) + OnResourceDoesNotExist() } // TODO: Once the implementation is complete, rename this interface as diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index cd8b86d81b37..8ce5cb28596e 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -108,10 +108,7 @@ func (r *RouteConfigResourceData) Raw() *anypb.Any { // events corresponding to the route configuration resource being watched. type RouteConfigWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnUpdate(*RouteConfigResourceData, DoneNotifier) + OnUpdate(*RouteConfigResourceData) // OnError is invoked under different error conditions including but not // limited to the following: @@ -121,34 +118,28 @@ type RouteConfigWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnError(error, DoneNotifier) + OnError(error) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - // - // The watcher is expected to call Done() on the DoneNotifier once it has - // processed the update. - OnResourceDoesNotExist(DoneNotifier) + OnResourceDoesNotExist() } type delegatingRouteConfigWatcher struct { watcher RouteConfigWatcher } -func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData, done DoneNotifier) { +func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData) { rc := data.(*RouteConfigResourceData) - d.watcher.OnUpdate(rc, done) + d.watcher.OnUpdate(rc) } -func (d *delegatingRouteConfigWatcher) OnError(err error, done DoneNotifier) { - d.watcher.OnError(err, done) +func (d *delegatingRouteConfigWatcher) OnError(err error) { + d.watcher.OnError(err) } -func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist(done DoneNotifier) { - d.watcher.OnResourceDoesNotExist(done) +func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() } // WatchRouteConfig uses xDS to discover the configuration associated with the diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 1d649ac55180..8ede639abee6 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -278,7 +278,7 @@ func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { // the received Cluster resource. func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { - return nil, fmt.Errorf("unsupported transport_socket_matches field is non-empty: %+v", tsm) + return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) } // The Cluster resource contains a `transport_socket` field, which contains // a oneof `typed_config` field of type `protobuf.Any`. The any proto @@ -477,7 +477,7 @@ func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsC case len(validationCtx.GetVerifyCertificateHash()) != 0: return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): - return nil, fmt.Errorf("unsupported require_signed_certificate_timestamp field in CommonTlsContext message: %+v", common) + return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) case validationCtx.GetCrl() != nil: return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) case validationCtx.GetCustomValidatorConfig() != nil: diff --git a/vendor/google.golang.org/grpc/xds/server.go b/vendor/google.golang.org/grpc/xds/server.go index 1fea8c830936..126aff067c4c 100644 --- a/vendor/google.golang.org/grpc/xds/server.go +++ b/vendor/google.golang.org/grpc/xds/server.go @@ -43,8 +43,8 @@ const serverPrefix = "[xds-server %p] " var ( // These new functions will be overridden in unit tests. - newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) { - return xdsclient.New(name) + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + return xdsclient.New() } newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { return grpc.NewServer(opts...) @@ -95,14 +95,11 @@ func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) { newXDSClient := newXDSClient if s.opts.bootstrapContentsForTesting != nil { // Bootstrap file contents may be specified as a server option for tests. - newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) { - return xdsclient.NewForTesting(xdsclient.OptionsForTesting{ - Name: name, - Contents: s.opts.bootstrapContentsForTesting, - }) + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Contents: s.opts.bootstrapContentsForTesting}) } } - xdsClient, xdsClientClose, err := newXDSClient(xdsclient.NameForServer) + xdsClient, xdsClientClose, err := newXDSClient() if err != nil { return nil, fmt.Errorf("xDS client creation failed: %v", err) } @@ -111,7 +108,7 @@ func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) { // Listener resource name template is mandatory on the server side. cfg := xdsClient.BootstrapConfig() - if cfg.ServerListenerResourceNameTemplate() == "" { + if cfg.ServerListenerResourceNameTemplate == "" { xdsClientClose() return nil, errors.New("missing server_listener_resource_name_template in the bootstrap configuration") } @@ -194,7 +191,7 @@ func (s *GRPCServer) Serve(lis net.Listener) error { // string, it will be replaced with the server's listening "IP:port" (e.g., // "0.0.0.0:8080", "[::]:8080"). cfg := s.xdsC.BootstrapConfig() - name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate(), lis.Addr().String()) + name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate, lis.Addr().String()) // Create a listenerWrapper which handles all functionality required by // this particular instance of Serve(). diff --git a/vendor/modules.txt b/vendor/modules.txt index b5df1af5a9ea..68bf8a0c86ce 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -985,7 +985,7 @@ github.com/gorilla/websocket # github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 ## explicit; go 1.17 github.com/grafana/cloudflare-go -# github.com/grafana/dskit v0.0.0-20240829123714-e455adb7444a +# github.com/grafana/dskit v0.0.0-20240905221822-931a021fb06b ## explicit; go 1.21 github.com/grafana/dskit/aws github.com/grafana/dskit/backoff @@ -1586,7 +1586,7 @@ github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require github.com/stretchr/testify/suite -# github.com/thanos-io/objstore v0.0.0-20240828153123-de861b433240 +# github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1 ## explicit; go 1.21 github.com/thanos-io/objstore github.com/thanos-io/objstore/exthttp @@ -1979,7 +1979,7 @@ google.golang.org/genproto/googleapis/api/monitoredres google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.66.0 +# google.golang.org/grpc v1.65.0 ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes @@ -2021,9 +2021,8 @@ google.golang.org/grpc/credentials/tls/certprovider/pemfile google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto -google.golang.org/grpc/experimental/stats +google.golang.org/grpc/experimental google.golang.org/grpc/grpclog -google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal @@ -2067,7 +2066,6 @@ google.golang.org/grpc/internal/xds/bootstrap/tlscreds google.golang.org/grpc/internal/xds/matcher google.golang.org/grpc/internal/xds/rbac google.golang.org/grpc/keepalive -google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/orca google.golang.org/grpc/orca/internal @@ -2105,10 +2103,8 @@ google.golang.org/grpc/xds/internal/resolver google.golang.org/grpc/xds/internal/resolver/internal google.golang.org/grpc/xds/internal/server google.golang.org/grpc/xds/internal/xdsclient -google.golang.org/grpc/xds/internal/xdsclient/internal google.golang.org/grpc/xds/internal/xdsclient/load google.golang.org/grpc/xds/internal/xdsclient/transport -google.golang.org/grpc/xds/internal/xdsclient/transport/internal google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter google.golang.org/grpc/xds/internal/xdsclient/xdsresource