From 1ae4f6ec464c0f0da2bba786da5cc95237a3d49b Mon Sep 17 00:00:00 2001 From: Heba Elayoty <31887807+helayoty@users.noreply.github.com> Date: Mon, 30 Oct 2023 17:23:51 -0700 Subject: [PATCH] Revert "chore: bump google.golang.org/grpc from 1.53.0 to 1.56.3 (#47)" This reverts commit 448a31e7d500a4655d5bd1f7e0651763d7d001e4. --- go.mod | 10 +- go.sum | 14 +- .../pkg/apis/settings/settings.go | 2 +- .../pkg/controllers/controllers.go | 33 +- .../machine/disruption/controller.go | 21 +- .../machine/garbagecollection/controller.go | 201 +++++- .../machine/lifecycle/controller.go | 1 + .../machine/lifecycle/initialization.go | 2 +- .../controllers/machine/lifecycle/launch.go | 4 +- .../machine/lifecycle/registration.go | 4 + .../machine/termination/controller.go | 1 + .../metrics/provisioner/controller.go | 11 +- .../pkg/controllers/state/cluster.go | 22 +- .../pkg/utils/machine/machine.go | 19 +- vendor/golang.org/x/oauth2/README.md | 12 +- vendor/golang.org/x/oauth2/oauth2.go | 33 +- vendor/golang.org/x/oauth2/token.go | 14 +- .../googleapis/api/httpbody/httpbody.pb.go | 4 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 25 +- .../grpc/attributes/attributes.go | 29 - .../grpc/balancer/balancer.go | 2 +- .../grpc/balancer_conn_wrappers.go | 486 ++++++------- .../grpc_binarylog_v1/binarylog.pb.go | 4 +- vendor/google.golang.org/grpc/call.go | 5 - vendor/google.golang.org/grpc/clientconn.go | 645 ++++++------------ .../grpc/codes/code_string.go | 51 +- vendor/google.golang.org/grpc/dialoptions.go | 47 +- .../grpc/health/grpc_health_v1/health.pb.go | 4 +- .../health/grpc_health_v1/health_grpc.pb.go | 15 +- vendor/google.golang.org/grpc/idle.go | 287 -------- .../grpc/internal/binarylog/binarylog.go | 11 +- .../grpc/internal/binarylog/method_logger.go | 14 +- .../grpc/internal/buffer/unbounded.go | 26 +- .../grpc/internal/envconfig/envconfig.go | 4 - .../grpc/internal/envconfig/observability.go | 6 - .../grpc/internal/envconfig/xds.go | 21 +- .../grpc/internal/grpclog/prefixLogger.go | 12 - .../grpc/internal/grpcrand/grpcrand.go | 14 - .../internal/grpcsync/callback_serializer.go | 119 ---- .../grpc/internal/internal.go | 34 - .../grpc/internal/metadata/metadata.go | 62 +- .../grpc/internal/serviceconfig/duration.go | 130 ---- .../grpc/internal/transport/controlbuf.go | 93 ++- .../grpc/internal/transport/handler_server.go | 11 +- .../grpc/internal/transport/http2_client.go | 46 +- .../grpc/internal/transport/http2_server.go | 99 ++- .../grpc/internal/transport/http_util.go | 26 +- .../grpc/internal/transport/logging.go | 40 -- .../grpc/internal/transport/transport.go | 25 +- .../grpc/metadata/metadata.go | 13 +- .../google.golang.org/grpc/picker_wrapper.go | 38 +- vendor/google.golang.org/grpc/pickfirst.go | 52 +- .../grpc/resolver/resolver.go | 36 +- .../grpc/resolver_conn_wrapper.go | 229 +++---- vendor/google.golang.org/grpc/rpc_util.go | 53 +- vendor/google.golang.org/grpc/server.go | 250 ++----- .../google.golang.org/grpc/service_config.go | 75 +- vendor/google.golang.org/grpc/stats/stats.go | 22 +- .../google.golang.org/grpc/status/status.go | 53 +- vendor/google.golang.org/grpc/stream.go | 90 +-- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 15 +- vendor/modules.txt | 6 +- 63 files changed, 1228 insertions(+), 2507 deletions(-) delete mode 100644 vendor/google.golang.org/grpc/idle.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go delete mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go diff --git a/go.mod b/go.mod index e194d4e9..dbab70d2 100644 --- a/go.mod +++ b/go.mod @@ -20,10 +20,10 @@ require ( github.com/onsi/ginkgo/v2 v2.11.0 github.com/onsi/gomega v1.27.10 github.com/patrickmn/go-cache v2.1.0+incompatible - github.com/pkg/errors v0.9.1 github.com/samber/lo v1.38.1 github.com/stretchr/testify v1.8.4 go.uber.org/multierr v1.11.0 + golang.org/x/crypto v0.12.0 k8s.io/api v0.25.4 k8s.io/apimachinery v0.25.4 k8s.io/client-go v0.25.4 @@ -91,6 +91,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.15.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect @@ -102,10 +103,9 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/automaxprocs v1.4.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.12.0 // indirect golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect golang.org/x/net v0.14.0 // indirect - golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/oauth2 v0.5.0 // indirect golang.org/x/sync v0.2.0 // indirect golang.org/x/sys v0.11.0 // indirect golang.org/x/term v0.11.0 // indirect @@ -115,8 +115,8 @@ require ( gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/api v0.61.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/grpc v1.56.3 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/grpc v1.53.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 9676964a..ce1509a8 100644 --- a/go.sum +++ b/go.sum @@ -200,7 +200,7 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -555,8 +555,8 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -816,8 +816,8 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -843,8 +843,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/vendor/github.com/aws/karpenter-core/pkg/apis/settings/settings.go b/vendor/github.com/aws/karpenter-core/pkg/apis/settings/settings.go index ecc2da88..54b6260e 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/apis/settings/settings.go +++ b/vendor/github.com/aws/karpenter-core/pkg/apis/settings/settings.go @@ -44,7 +44,7 @@ type Settings struct { } func (*Settings) ConfigMap() string { - return "karpenter-global-settings" + return "gpu-provisioner-global-settings" } // Inject creates a Settings from the supplied ConfigMap diff --git a/vendor/github.com/aws/karpenter-core/pkg/controllers/controllers.go b/vendor/github.com/aws/karpenter-core/pkg/controllers/controllers.go index 7bc9b534..3ea2880e 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/controllers/controllers.go +++ b/vendor/github.com/aws/karpenter-core/pkg/controllers/controllers.go @@ -23,18 +23,11 @@ import ( "github.com/aws/karpenter-core/pkg/cloudprovider" "github.com/aws/karpenter-core/pkg/controllers/consistency" - "github.com/aws/karpenter-core/pkg/controllers/counter" - "github.com/aws/karpenter-core/pkg/controllers/deprovisioning" machinedisruption "github.com/aws/karpenter-core/pkg/controllers/machine/disruption" machinegarbagecollection "github.com/aws/karpenter-core/pkg/controllers/machine/garbagecollection" machinelifecycle "github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle" machinetermination "github.com/aws/karpenter-core/pkg/controllers/machine/termination" - metricspod "github.com/aws/karpenter-core/pkg/controllers/metrics/pod" - metricsprovisioner "github.com/aws/karpenter-core/pkg/controllers/metrics/provisioner" - metricsstate "github.com/aws/karpenter-core/pkg/controllers/metrics/state" - "github.com/aws/karpenter-core/pkg/controllers/provisioning" "github.com/aws/karpenter-core/pkg/controllers/state" - "github.com/aws/karpenter-core/pkg/controllers/state/informer" "github.com/aws/karpenter-core/pkg/controllers/termination" "github.com/aws/karpenter-core/pkg/controllers/termination/terminator" "github.com/aws/karpenter-core/pkg/events" @@ -51,23 +44,23 @@ func NewControllers( cloudProvider cloudprovider.CloudProvider, ) []controller.Controller { - provisioner := provisioning.NewProvisioner(kubeClient, kubernetesInterface.CoreV1(), recorder, cloudProvider, cluster) + //provisioner := provisioning.NewProvisioner(kubeClient, kubernetesInterface.CoreV1(), recorder, cloudProvider, cluster) terminator := terminator.NewTerminator(clock, kubeClient, terminator.NewEvictionQueue(ctx, kubernetesInterface.CoreV1(), recorder)) return []controller.Controller{ - provisioner, - metricsstate.NewController(cluster), - deprovisioning.NewController(clock, kubeClient, provisioner, cloudProvider, recorder, cluster), - provisioning.NewController(kubeClient, provisioner, recorder), - informer.NewDaemonSetController(kubeClient, cluster), - informer.NewNodeController(kubeClient, cluster), - informer.NewPodController(kubeClient, cluster), - informer.NewProvisionerController(kubeClient, cluster), - informer.NewMachineController(kubeClient, cluster), + //provisioner, + //metricsstate.NewController(cluster), + //deprovisioning.NewController(clock, kubeClient, provisioner, cloudProvider, recorder, cluster), + //provisioning.NewController(kubeClient, provisioner, recorder), + //informer.NewDaemonSetController(kubeClient, cluster), + //informer.NewNodeController(kubeClient, cluster), + //informer.NewPodController(kubeClient, cluster), + //informer.NewProvisionerController(kubeClient, cluster), + //informer.NewMachineController(kubeClient, cluster), termination.NewController(kubeClient, cloudProvider, terminator, recorder), - metricspod.NewController(kubeClient), - metricsprovisioner.NewController(kubeClient), - counter.NewController(kubeClient, cluster), + //metricspod.NewController(kubeClient), + //metricsprovisioner.NewController(kubeClient), + //counter.NewController(kubeClient, cluster), consistency.NewController(clock, kubeClient, recorder, cloudProvider), machinelifecycle.NewController(clock, kubeClient, cloudProvider, recorder), machinegarbagecollection.NewController(clock, kubeClient, cloudProvider), diff --git a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/disruption/controller.go b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/disruption/controller.go index eb92d722..5221e657 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/disruption/controller.go +++ b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/disruption/controller.go @@ -21,7 +21,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" "k8s.io/utils/clock" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -39,6 +38,8 @@ import ( corecontroller "github.com/aws/karpenter-core/pkg/operator/controller" machineutil "github.com/aws/karpenter-core/pkg/utils/machine" "github.com/aws/karpenter-core/pkg/utils/result" + + "github.com/azure/gpu-provisioner/pkg/staticprovisioner" ) type machineReconciler interface { @@ -80,10 +81,11 @@ func (c *Controller) Reconcile(ctx context.Context, machine *v1alpha5.Machine) ( if !machine.DeletionTimestamp.IsZero() { return reconcile.Result{}, nil } - provisioner := &v1alpha5.Provisioner{} - if err := c.kubeClient.Get(ctx, types.NamespacedName{Name: machine.Labels[v1alpha5.ProvisionerNameLabelKey]}, provisioner); err != nil { - return reconcile.Result{}, client.IgnoreNotFound(err) - } + // provisioner := &v1alpha5.Provisioner{} + // if err := c.kubeClient.Get(ctx, types.NamespacedName{Name: machine.Labels[v1alpha5.ProvisionerNameLabelKey]}, provisioner); err != nil { + // return reconcile.Result{}, client.IgnoreNotFound(err) + // } + provisioner := staticprovisioner.Sp var results []reconcile.Result var errs error @@ -134,11 +136,12 @@ func (c *Controller) Builder(ctx context.Context, m manager.Manager) corecontrol }, ), )). + WithEventFilter(machineutil.KaitoMachinePredicate). WithOptions(controller.Options{MaxConcurrentReconciles: 10}). - Watches( - &source.Kind{Type: &v1alpha5.Provisioner{}}, - machineutil.ProvisionerEventHandler(ctx, c.kubeClient), - ). + // Watches( + // &source.Kind{Type: &v1alpha5.Provisioner{}}, + // machineutil.ProvisionerEventHandler(ctx, c.kubeClient), + // ). Watches( &source.Kind{Type: &v1.Node{}}, machineutil.NodeEventHandler(ctx, c.kubeClient), diff --git a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/garbagecollection/controller.go b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/garbagecollection/controller.go index e966c4b6..2954dcca 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/garbagecollection/controller.go +++ b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/garbagecollection/controller.go @@ -16,15 +16,21 @@ package garbagecollection import ( "context" + "fmt" + "sync/atomic" "time" "github.com/prometheus/client_golang/prometheus" "github.com/samber/lo" "go.uber.org/multierr" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/workqueue" "k8s.io/utils/clock" "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -35,6 +41,10 @@ import ( "github.com/aws/karpenter-core/pkg/utils/sets" ) +const ( + NodeHeartBeatAnnotationKey = "NodeHeartBeatTimeStamp" +) + type Controller struct { clock clock.Clock kubeClient client.Client @@ -54,13 +64,116 @@ func (c *Controller) Name() string { } func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) { + merr := c.reconcileMachines(ctx) + nerr := c.reconcileNodes(ctx) + + return reconcile.Result{RequeueAfter: time.Minute * 2}, multierr.Combine(merr, nerr) +} + +// gpu-provisioner: node name conflict means two machines are linked to the same node. +// AKS APIs only support PUT not POST for creating a new agent pool. The name conflict during +// creation will not return error. The latest create call will update the agent pool unconditionally. +// We cannot completlely avoid the conflict. +// But since we use machine name as agent pool name, the chance of hitting agent pool name conflict is very low. +// We rely on periodic checks to remediate the conflict if any. +func (c *Controller) remediateNodeNameConflict(ctx context.Context) error { + machineList := &v1alpha5.MachineList{} + if err := c.kubeClient.List(ctx, machineList, client.HasLabels([]string{"kaito.sh/workspace"})); err != nil { + return err + } + + nodeToMachineMap := make(map[string][]*v1alpha5.Machine) + + for i, _ := range machineList.Items { + if node, linked := machineList.Items[i].Annotations[v1alpha5.MachineLinkedAnnotationKey]; linked { + if _, ok := nodeToMachineMap[node]; !ok { + nodeToMachineMap[node] = []*v1alpha5.Machine{&machineList.Items[i]} + } else { + nodeToMachineMap[node] = append(nodeToMachineMap[node], &machineList.Items[i]) + } + } + } + + for _, v := range nodeToMachineMap { + // Detect node name conflict, the remediation is to remove extra machines. + if len(v) > 1 { + // There is no particular order to decide which ones should be deleted. + for i := 1; i < len(v); i++ { + stored := v[i].DeepCopy() + updated := v[i].DeepCopy() + updated.Annotations[v1alpha5.MachineLinkedAnnotationKey] = "" + updated.Status.ProviderID = "" + controllerutil.RemoveFinalizer(updated, v1alpha5.TerminationFinalizer) + + updatedCopy := updated.DeepCopy() + if err := c.kubeClient.Patch(ctx, updated, client.MergeFrom(stored)); client.IgnoreNotFound(err) != nil { + return err + } + if err := c.kubeClient.Status().Patch(ctx, updatedCopy, client.MergeFrom(stored)); client.IgnoreNotFound(err) != nil { + return err + } + // After clear the provider id, we are safe to delete the machine. + if err := c.kubeClient.Delete(ctx, stored); client.IgnoreNotFound(err) != nil { + return err + } + logging.FromContext(ctx).Debugf("delete machine %s because it has a node name conflict with machine %s", v[i].Name, v[0].Name) + } + } + } + return nil +} + +func (c *Controller) reconcileNodes(ctx context.Context) error { + nodeList := &v1.NodeList{} + if err := c.kubeClient.List(ctx, nodeList); err != nil { + return err + } + + machineList := &v1alpha5.MachineList{} + if err := c.kubeClient.List(ctx, machineList, client.HasLabels([]string{"kaito.sh/workspace"})); err != nil { + return err + } + + machineNames := make(map[string]struct{}) + for _, each := range machineList.Items { + machineNames[each.Name] = struct{}{} + } + + deletedGarbageNodes := lo.Filter(lo.ToSlicePtr(nodeList.Items), func(n *v1.Node, _ int) bool { + _, ok := n.Labels[v1alpha5.ProvisionerNameLabelKey] + if ok && n.Spec.ProviderID != "" { + // We rely on a strong assumption here: the machine name is equal to the agent pool name. + machine := n.Labels["kubernetes.azure.com/agentpool"] + if _, ok := machineNames[machine]; !ok { + return true + } + } + return false + }) + + errs := make([]error, len(deletedGarbageNodes)) + workqueue.ParallelizeUntil(ctx, 20, len(deletedGarbageNodes), func(i int) { + // We delete nodes to trigger the node finalization and deletion flow. + if err := c.kubeClient.Delete(ctx, deletedGarbageNodes[i]); client.IgnoreNotFound(err) != nil { + errs[i] = err + return + } + logging.FromContext(ctx).Debugf("garbage collecting node %s since the linked machine does not exist", deletedGarbageNodes[i].Name) + }) + return multierr.Combine(errs...) +} + +// gpu-provisioner: leverage the two minutes perodic check to update machine readiness heartbeat. +func (c *Controller) reconcileMachines(ctx context.Context) error { machineList := &v1alpha5.MachineList{} - if err := c.kubeClient.List(ctx, machineList); err != nil { - return reconcile.Result{}, err + if err := c.kubeClient.List(ctx, machineList, client.HasLabels([]string{"kaito.sh/workspace"})); err != nil { + return err } + + // The NotReady nodes are excluded from the list. cloudProviderMachines, err := c.cloudProvider.List(ctx) if err != nil { - return reconcile.Result{}, err + return err } cloudProviderMachines = lo.Filter(cloudProviderMachines, func(m *v1alpha5.Machine, _ int) bool { return m.DeletionTimestamp.IsZero() @@ -68,28 +181,94 @@ func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconc cloudProviderProviderIDs := sets.New[string](lo.Map(cloudProviderMachines, func(m *v1alpha5.Machine, _ int) string { return m.Status.ProviderID })...) - machines := lo.Filter(lo.ToSlicePtr(machineList.Items), func(m *v1alpha5.Machine, _ int) bool { + + deletedGarbageMachines := lo.Filter(lo.ToSlicePtr(machineList.Items), func(m *v1alpha5.Machine, _ int) bool { + // The assumption is that any clean up work should be done in 10 minutes. + // The node gc will cover any problems caused by this force deletion. + return !m.DeletionTimestamp.IsZero() && metav1.Now().After((*m.DeletionTimestamp).Add(time.Minute*10)) + }) + + rfErrs := c.batchDeleteMachines(ctx, deletedGarbageMachines, true, "to be delete but blocked by finializer for more than 10 minutes") + + // Check all machine heartbeats. + hbMachines := lo.Filter(lo.ToSlicePtr(machineList.Items), func(m *v1alpha5.Machine, _ int) bool { return m.StatusConditions().GetCondition(v1alpha5.MachineLaunched).IsTrue() && - m.DeletionTimestamp.IsZero() && - c.clock.Since(m.StatusConditions().GetCondition(v1alpha5.MachineLaunched).LastTransitionTime.Inner.Time) > time.Second*10 && - !cloudProviderProviderIDs.Has(m.Status.ProviderID) + m.DeletionTimestamp.IsZero() }) + var hbUpdated atomic.Uint64 + deletedNotReadyMachines := []*v1alpha5.Machine{} + // Update machine heartbeat. + hbErrs := make([]error, len(hbMachines)) + workqueue.ParallelizeUntil(ctx, 20, len(hbMachines), func(i int) { + stored := hbMachines[i].DeepCopy() + updated := hbMachines[i].DeepCopy() + + if cloudProviderProviderIDs.Has(stored.Status.ProviderID) { + hbUpdated.Add(1) + if updated.Annotations == nil { + updated.Annotations = make(map[string]string) + } + + timeStr, _ := metav1.NewTime(time.Now()).MarshalJSON() + updated.Annotations[NodeHeartBeatAnnotationKey] = string(timeStr) + + // If the machine was not ready, it becomes ready after getting the heartbeat. + updated.StatusConditions().MarkTrue("Ready") + } else { + logging.FromContext(ctx).Debugf(fmt.Sprintf("machine %s does not receive hb", stored.Name)) + updated.StatusConditions().MarkFalse("Ready", "NodeNotReady", "Node status is NotReady") + } + statusCopy := updated.DeepCopy() + updateCopy := updated.DeepCopy() + if err := c.kubeClient.Patch(ctx, updated, client.MergeFrom(stored)); err != nil { + hbErrs[i] = client.IgnoreNotFound(err) + return + } + if err := c.kubeClient.Status().Patch(ctx, statusCopy, client.MergeFrom(stored)); err != nil { + hbErrs[i] = client.IgnoreNotFound(err) + return + } + if !updateCopy.StatusConditions().IsHappy() && + c.clock.Since(updateCopy.StatusConditions().GetCondition("Ready").LastTransitionTime.Inner.Time) > time.Minute*10 { + deletedNotReadyMachines = append(deletedNotReadyMachines, updateCopy) + } + }) + if len(hbMachines) > 0 { + logging.FromContext(ctx).Debugf(fmt.Sprintf("Update heartbeat for %d out of %d machines", hbUpdated.Load(), len(hbMachines))) + } + errs := c.batchDeleteMachines(ctx, deletedNotReadyMachines, false, "being NotReady for more than 10 minutes") + errs = append(errs, hbErrs...) + errs = append(errs, rfErrs...) + return multierr.Combine(errs...) +} + +func (c *Controller) batchDeleteMachines(ctx context.Context, machines []*v1alpha5.Machine, removeFinalizer bool, msg string) []error { errs := make([]error, len(machines)) workqueue.ParallelizeUntil(ctx, 20, len(machines), func(i int) { - if err := c.kubeClient.Delete(ctx, machines[i]); err != nil { - errs[i] = client.IgnoreNotFound(err) + if removeFinalizer { + stored := machines[i].DeepCopy() + updated := machines[i].DeepCopy() + controllerutil.RemoveFinalizer(updated, v1alpha5.TerminationFinalizer) + if !equality.Semantic.DeepEqual(stored, updated) { + if err := c.kubeClient.Patch(ctx, updated, client.MergeFrom(stored)); client.IgnoreNotFound(err) != nil { + errs[i] = err + return + } + } + } else if err := c.kubeClient.Delete(ctx, machines[i]); client.IgnoreNotFound(err) != nil { + errs[i] = err return } logging.FromContext(ctx). With("provisioner", machines[i].Labels[v1alpha5.ProvisionerNameLabelKey], "machine", machines[i].Name, "provider-id", machines[i].Status.ProviderID). - Debugf("garbage collecting machine with no cloudprovider representation") + Debugf("garbage collecting machine with reason: %s", msg) metrics.MachinesTerminatedCounter.With(prometheus.Labels{ metrics.ReasonLabel: "garbage_collected", metrics.ProvisionerLabel: machines[i].Labels[v1alpha5.ProvisionerNameLabelKey], }).Inc() }) - return reconcile.Result{RequeueAfter: time.Minute * 2}, multierr.Combine(errs...) + return errs } func (c *Controller) Builder(_ context.Context, m manager.Manager) corecontroller.Builder { diff --git a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/controller.go b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/controller.go index 3778e6ac..3dc56dc7 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/controller.go +++ b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/controller.go @@ -135,6 +135,7 @@ func (c *Controller) Builder(ctx context.Context, m manager.Manager) corecontrol DeleteFunc: func(e event.DeleteEvent) bool { return false }, }, )). + WithEventFilter(machineutil.KaitoMachinePredicate). Watches( &source.Kind{Type: &v1.Node{}}, machineutil.NodeEventHandler(ctx, c.kubeClient), diff --git a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/initialization.go b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/initialization.go index 1018bb28..ccc2b428 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/initialization.go +++ b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/initialization.go @@ -129,7 +129,7 @@ func RequestedResourcesRegistered(node *v1.Node, machine *v1alpha5.Machine) (v1. // annotation says the resource should be there, but it's zero'd in both then the device plugin hasn't // registered it yet. // We wait on allocatable since this is the value that is used in scheduling - if resources.IsZero(node.Status.Allocatable[resourceName]) { + if resourceName != v1.ResourceStorage && resources.IsZero(node.Status.Allocatable[resourceName]) { return resourceName, false } } diff --git a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/launch.go b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/launch.go index 3487d0e0..2d33a949 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/launch.go +++ b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/launch.go @@ -152,8 +152,8 @@ func PopulateMachineDetails(machine, retrieved *v1alpha5.Machine) { } func truncateMessage(msg string) string { - if len(msg) < 300 { + if len(msg) < 800 { return msg } - return msg[:300] + "..." + return msg[:800] + "..." } diff --git a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/registration.go b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/registration.go index 2cf6dd7b..f1585085 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/registration.go +++ b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle/registration.go @@ -70,6 +70,10 @@ func (r *Registration) Reconcile(ctx context.Context, machine *v1alpha5.Machine) logging.FromContext(ctx).Debugf("registered machine") machine.StatusConditions().MarkTrue(v1alpha5.MachineRegistered) machine.Status.NodeName = node.Name + // gpu-provisioner: update allocatable and capactity from node as well + machine.Status.Allocatable = node.Status.Allocatable + machine.Status.Capacity = node.Status.Capacity + metrics.MachinesRegisteredCounter.With(prometheus.Labels{ metrics.ProvisionerLabel: machine.Labels[v1alpha5.ProvisionerNameLabelKey], }).Inc() diff --git a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/termination/controller.go b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/termination/controller.go index efd32c27..178a4985 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/termination/controller.go +++ b/vendor/github.com/aws/karpenter-core/pkg/controllers/machine/termination/controller.go @@ -106,6 +106,7 @@ func (c *Controller) Builder(ctx context.Context, m manager.Manager) corecontrol NewControllerManagedBy(m). For(&v1alpha5.Machine{}). WithEventFilter(predicate.GenerationChangedPredicate{}). + WithEventFilter(machineutil.KaitoMachinePredicate). Watches( &source.Kind{Type: &v1.Node{}}, machineutil.NodeEventHandler(ctx, c.kubeClient), diff --git a/vendor/github.com/aws/karpenter-core/pkg/controllers/metrics/provisioner/controller.go b/vendor/github.com/aws/karpenter-core/pkg/controllers/metrics/provisioner/controller.go index 8d8c4656..014a01c2 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/controllers/metrics/provisioner/controller.go +++ b/vendor/github.com/aws/karpenter-core/pkg/controllers/metrics/provisioner/controller.go @@ -35,6 +35,8 @@ import ( "github.com/aws/karpenter-core/pkg/apis/v1alpha5" corecontroller "github.com/aws/karpenter-core/pkg/operator/controller" + + "github.com/gpu-provisioner/pkg/staticprovisioner" ) const ( @@ -112,10 +114,11 @@ func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reco c.cleanup(req.NamespacedName) // Retrieve provisioner from reconcile request - provisioner := &v1alpha5.Provisioner{} - if err := c.kubeClient.Get(ctx, req.NamespacedName, provisioner); err != nil { - return reconcile.Result{}, client.IgnoreNotFound(err) - } + // provisioner := &v1alpha5.Provisioner{} + // if err := c.kubeClient.Get(ctx, req.NamespacedName, provisioner); err != nil { + // return reconcile.Result{}, client.IgnoreNotFound(err) + // } + provisioner := staticprovisioner.Sp c.record(ctx, provisioner) // periodically update our metrics per provisioner even if nothing has changed diff --git a/vendor/github.com/aws/karpenter-core/pkg/controllers/state/cluster.go b/vendor/github.com/aws/karpenter-core/pkg/controllers/state/cluster.go index c1c26b96..b7951f26 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/controllers/state/cluster.go +++ b/vendor/github.com/aws/karpenter-core/pkg/controllers/state/cluster.go @@ -41,6 +41,8 @@ import ( "github.com/aws/karpenter-core/pkg/scheduling" podutils "github.com/aws/karpenter-core/pkg/utils/pod" "github.com/aws/karpenter-core/pkg/utils/sets" + + "github.com/azure/gpu-provisioner/pkg/staticprovisioner" ) // Cluster maintains cluster state that is often needed but expensive to compute. @@ -399,7 +401,7 @@ func (c *Cluster) newStateFromNode(ctx context.Context, node *v1.Node, oldNode * } if err := multierr.Combine( c.populateStartupTaints(ctx, n), - c.populateInflight(ctx, n), + // c.populateInflight(ctx, n), // gpu-provisioner does not need to calculate inflight capacity c.populateResourceRequests(ctx, n), c.populateVolumeLimits(ctx, n), ); err != nil { @@ -431,10 +433,11 @@ func (c *Cluster) populateStartupTaints(ctx context.Context, n *StateNode) error if !n.Owned() { return nil } - provisioner := &v1alpha5.Provisioner{} - if err := c.kubeClient.Get(ctx, client.ObjectKey{Name: n.Labels()[v1alpha5.ProvisionerNameLabelKey]}, provisioner); err != nil { - return client.IgnoreNotFound(fmt.Errorf("getting provisioner, %w", err)) - } + // provisioner := &v1alpha5.Provisioner{} + // if err := c.kubeClient.Get(ctx, client.ObjectKey{Name: n.Labels()[v1alpha5.ProvisionerNameLabelKey]}, provisioner); err != nil { + // return client.IgnoreNotFound(fmt.Errorf("getting provisioner, %w", err)) + // } + provisioner := staticprovisioner.Sp n.startupTaints = provisioner.Spec.StartupTaints return nil } @@ -443,10 +446,11 @@ func (c *Cluster) populateInflight(ctx context.Context, n *StateNode) error { if !n.Owned() { return nil } - provisioner := &v1alpha5.Provisioner{} - if err := c.kubeClient.Get(ctx, client.ObjectKey{Name: n.Labels()[v1alpha5.ProvisionerNameLabelKey]}, provisioner); err != nil { - return client.IgnoreNotFound(fmt.Errorf("getting provisioner, %w", err)) - } + // provisioner := &v1alpha5.Provisioner{} + // if err := c.kubeClient.Get(ctx, client.ObjectKey{Name: n.Labels()[v1alpha5.ProvisionerNameLabelKey]}, provisioner); err != nil { + // return client.IgnoreNotFound(fmt.Errorf("getting provisioner, %w", err)) + // } + provisioner := staticprovisioner.Sp instanceTypes, err := c.cloudProvider.GetInstanceTypes(ctx, provisioner) if err != nil { return err diff --git a/vendor/github.com/aws/karpenter-core/pkg/utils/machine/machine.go b/vendor/github.com/aws/karpenter-core/pkg/utils/machine/machine.go index 926296d0..1ed83b6a 100644 --- a/vendor/github.com/aws/karpenter-core/pkg/utils/machine/machine.go +++ b/vendor/github.com/aws/karpenter-core/pkg/utils/machine/machine.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "strings" "time" "github.com/samber/lo" @@ -28,12 +29,19 @@ import ( "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/aws/karpenter-core/pkg/apis/v1alpha5" "github.com/aws/karpenter-core/pkg/scheduling" ) +var KaitoMachinePredicate, _ = predicate.LabelSelectorPredicate(metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + metav1.LabelSelectorRequirement{Key: "kaito.sh/workspace", Operator: "Exists"}, + }, +}) + // EventHandler is a watcher on v1alpha5.Machine that maps Machines to Nodes based on provider ids // and enqueues reconcile.Requests for the Nodes func EventHandler(ctx context.Context, c client.Client) handler.EventHandler { @@ -183,7 +191,16 @@ func AllNodesForMachine(ctx context.Context, c client.Client, machine *v1alpha5. providerID := lo.Ternary(machine.Status.ProviderID != "", machine.Status.ProviderID, machine.Annotations[v1alpha5.MachineLinkedAnnotationKey]) // Machines that have no resolved providerID have no nodes mapped to them if providerID == "" { - return nil, nil + // check common failures caused by bad input + msg := machine.StatusConditions().GetCondition(v1alpha5.MachineLaunched).GetMessage() + if machine.StatusConditions().GetCondition(v1alpha5.MachineLaunched).IsFalse() && + (msg == "all requested instance types were unavailable during launch" || + strings.Contains(msg, "is not allowed in your subscription in location")) { + return nil, nil // Not recoverable, does not consider as an error + + } else { + return nil, fmt.Errorf("The machine has not been associated with any node yet") + } } nodeList := v1.NodeList{} if err := c.List(ctx, &nodeList, client.MatchingFields{"spec.providerID": providerID}); err != nil { diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c2..1473e129 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) * [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) -## Policy for new endpoints +## Policy for new packages We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a @@ -29,12 +29,8 @@ package. ## Report Issues / Send Patches -The main issue tracker for the oauth2 repository is located at -https://github.com/golang/oauth2/issues. - This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://golang.org/doc/contribute.html. -* Excluding trivial changes, all contributions should be connected to an existing issue. -* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. -* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). +The main issue tracker for the oauth2 repository is located at +https://github.com/golang/oauth2/issues. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 9085fabe..291df5c8 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -16,7 +16,6 @@ import ( "net/url" "strings" "sync" - "time" "golang.org/x/oauth2/internal" ) @@ -141,7 +140,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // // State is a token to protect the user from CSRF attacks. You must // always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. +// the state query parameter on your redirect callback. // See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well @@ -291,8 +290,6 @@ type reuseTokenSource struct { mu sync.Mutex // guards t t *Token - - expiryDelta time.Duration } // Token returns the current token if it's still valid, else will @@ -308,7 +305,6 @@ func (s *reuseTokenSource) Token() (*Token, error) { if err != nil { return nil, err } - t.expiryDelta = s.expiryDelta s.t = t return t, nil } @@ -383,30 +379,3 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { new: src, } } - -// ReuseTokenSource returns a TokenSource that acts in the same manner as the -// TokenSource returned by ReuseTokenSource, except the expiry buffer is -// configurable. The expiration time of a token is calculated as -// t.Expiry.Add(-earlyExpiry). -func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { - // Don't wrap a reuseTokenSource in itself. That would work, - // but cause an unnecessary number of mutex operations. - // Just build the equivalent one. - if rt, ok := src.(*reuseTokenSource); ok { - if t == nil { - // Just use it directly, but set the expiryDelta to earlyExpiry, - // so the behavior matches what the user expects. - rt.expiryDelta = earlyExpiry - return rt - } - src = rt.new - } - if t != nil { - t.expiryDelta = earlyExpiry - } - return &reuseTokenSource{ - t: t, - new: src, - expiryDelta: earlyExpiry, - } -} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 7c64006d..82272034 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -16,10 +16,10 @@ import ( "golang.org/x/oauth2/internal" ) -// defaultExpiryDelta determines how earlier a token should be considered +// expiryDelta determines how earlier a token should be considered // expired than its actual expiration time. It is used to avoid late // expirations due to client-server time mismatches. -const defaultExpiryDelta = 10 * time.Second +const expiryDelta = 10 * time.Second // Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 @@ -52,11 +52,6 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. raw interface{} - - // expiryDelta is used to calculate when a token is considered - // expired, by subtracting from Expiry. If zero, defaultExpiryDelta - // is used. - expiryDelta time.Duration } // Type returns t.TokenType if non-empty, else "Bearer". @@ -132,11 +127,6 @@ func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } - - expiryDelta := defaultExpiryDelta - if t.expiryDelta != 0 { - expiryDelta = t.expiryDelta - } return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) } diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index 3543268f..af72196c 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2015 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v3.12.2 // source: google/api/httpbody.proto package httpbody diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 608aa6e1..52338d00 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,15 +20,6 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. -- If you are searching for features to work on, issues labeled [Status: Help - Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) - is a great place to start. These issues are well-documented and usually can be - resolved with a single pull request. - -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file - and update the year. - - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -41,18 +32,14 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the - benefits of the change. +- Don't fix code style and formatting unless you are already changing that line + to address an issue. PRs with irrelevant changes won't be merged. If you do + want to fix formatting or style, do that in a separate PR. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We'll mark it as `Status: Requires - Reporter Clarification` if we expect you to respond to these comments in a - timely manner. If the PR remains inactive for 6 days, it will be marked as - `stale` and automatically close 7 days after that if we don't hear back from - you. + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 weeks + of inactivity. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3efca459..02f5dc53 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,11 +25,6 @@ // later release. package attributes -import ( - "fmt" - "strings" -) - // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an @@ -104,27 +99,3 @@ func (a *Attributes) Equal(o *Attributes) bool { } return true } - -// String prints the attribute map. If any key or values throughout the map -// implement fmt.Stringer, it calls that method and appends. -func (a *Attributes) String() string { - var sb strings.Builder - sb.WriteString("{") - first := true - for k, v := range a.m { - var key, val string - if str, ok := k.(interface{ String() string }); ok { - key = str.String() - } - if str, ok := v.(interface{ String() string }); ok { - val = str.String() - } - if !first { - sb.WriteString(", ") - } - sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) - first = false - } - sb.WriteString("}") - return sb.String() -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 8f00523c..09d61dd1 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -286,7 +286,7 @@ type PickResult struct { // // LB policies with child policies are responsible for propagating metadata // injected by their children to the ClientConn, as part of Pick(). - Metadata metadata.MD + Metatada metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 04b9ad41..0359956d 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -25,20 +25,14 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" -) - -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle + "google.golang.org/grpc/status" ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -55,101 +49,192 @@ const ( // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions - - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. + cc *ClientConn + + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. balancer *gracefulswitch.Balancer curBalancerName string - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } // newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer // is not created until the switchTo() method is invoked. func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, + cc: cc, + updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), } + go ccb.watcher() ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case u := <-ccb.updateCh.Get(): + ccb.updateCh.Load() + if ccb.closed.HasFired() { + break + } + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) + case *scStateUpdate: + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) + } + case <-ccb.closed.Done(): + } + + if ccb.closed.HasFired() { + ccb.handleClose() + return + } + } +} + // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - // If the addresses specified in the update contain addresses of type - // "grpclb" and the selected LB policy is not "grpclb", these addresses - // will be filtered out and ccs will be modified with the updated - // address list. - if ccb.curBalancerName != grpclbName { - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) +} + +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue } - ccs.ResolverState.Addresses = addrs + addrs = append(addrs, addr) } - errCh <- ccb.balancer.UpdateClientConnState(*ccs) - }) - if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") + ccs.ResolverState.Addresses = addrs } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) - } - return err + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } // updateSubConnState is invoked by grpc to push a subConn state update to the // underlying balancer. func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.updateCh.Put(&scStateUpdate{ + sc: sc, + state: s, + err: err, }) - ccb.mu.Unlock() +} + +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.ResolverError(err) - }) - ccb.mu.Unlock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { + ccb.balancer.ResolverError(err) } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -163,27 +248,24 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - // TODO: Other languages use case-sensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - ccb.buildLoadBalancingPolicy(name) - }) - ccb.mu.Unlock() + ccb.updateCh.Put(&switchToUpdate{name: name}) } -// buildLoadBalancingPolicy performs the following: -// - retrieve a balancer builder for the given name. Use the default LB -// policy, pick_first, if no LB policy with name is found in the registry. -// - instruct the gracefulswitch balancer to switch to the above builder. This -// will actually build the new balancer. -// - update the `curBalancerName` field -// -// Must be called from a serializer callback. -func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. builder := balancer.Get(name) if builder == nil { channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) @@ -199,114 +281,26 @@ func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { ccb.curBalancerName = builder.Name() } -func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } - ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-done - // Spawn a goroutine to close the balancer (since it may block trying to - // cleanup all allocated resources) and return early. - go b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. // -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() - return - } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - - }) - ccb.mu.Unlock() +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} - <-done +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() } -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") - } - - if len(addrs) == 0 { + if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) @@ -315,35 +309,31 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return nil, err } acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} + acbw.ac.mu.Lock() ac.acbw = acbw + acbw.ac.mu.Unlock() return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. acbw, ok := sc.(*acBalancerWrapper) if !ok { return } - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -352,10 +342,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { - return - } - // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -366,10 +352,6 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { - return - } - ccb.cc.resolveNow(o) } @@ -380,31 +362,71 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - ac *addrConn // read-only - mu sync.Mutex + ac *addrConn producers map[balancer.ProducerBuilder]*refCountedProducer } -func (acbw *acBalancerWrapper) String() string { - return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) -} - func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.ac.updateAddrs(addrs) + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + newAC, err := cc.newAddrConn(addrs, opts) + if err != nil { + channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = newAC + newAC.mu.Lock() + newAC.acbw = acbw + newAC.mu.Unlock() + if acState != connectivity.Idle { + go newAC.connect() + } + } } func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() go acbw.ac.connect() } +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + // NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, blocks until it is or ctx expires. Returns an error when the context -// expires or the addrConn is shut down. +// ready, returns errSubConnNotReady. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, errSubConnNotReady } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ec2c2fa1..66d141fc 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v4.22.0 +// protoc-gen-go v1.28.1 +// protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index e6a1dc5d..9e20e4d3 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -27,11 +27,6 @@ import ( // // All errors returned by Invoke are compatible with the status package. func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return err - } - defer cc.idlenessMgr.onCallEnd() - // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 95a7459b..d607d4e9 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "reflect" "strings" "sync" "sync/atomic" @@ -68,9 +69,6 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errConnIdling indicates the the connection is being closed as the channel - // is moving to an idle mode due to inactivity. - errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -136,42 +134,20 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - czData: new(channelzData), - } - - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), + } cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) - - disableGlobalOpts := false - for _, opt := range opts { - if _, ok := opt.(*disableGlobalDialOptions); ok { - disableGlobalOpts = true - break - } - } - if !disableGlobalOpts { - for _, opt := range globalDialOptions { - opt.apply(&cc.dopts) - } + for _, opt := range extraDialOptions { + opt.apply(&cc.dopts) } for _, opt := range opts { @@ -187,11 +163,40 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - // Register ClientConn with channelz. - cc.channelzRegistration(target) + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID - if err := cc.validateTransportCredentials(); err != nil { - return nil, err + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return nil, errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -229,19 +234,35 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + scSet = true + } + default: + } + } if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { + resolverBuilder, err := cc.parseTargetAndFindResolver() + if err != nil { return nil, err } - if err = cc.determineAuthority(); err != nil { + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) + if err != nil { return nil, err } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) - if cc.dopts.scChan != nil { + if cc.dopts.scChan != nil && !scSet { // Blocking wait for the initial service config. select { case sc, ok := <-cc.dopts.scChan: @@ -257,224 +278,57 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - - // A blocking dial blocks until the clientConn is ready. - for { - s := cc.GetState() - if s == connectivity.Idle { - cc.Connect() - } - if s == connectivity.Ready { - return cc, nil - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() - } - } -} - -// addTraceEvent is a helper method to add a trace event on the channel. If the -// channel is a nested one, the same event is also added on the parent channel. -func (cc *ClientConn) addTraceEvent(msg string) { - ted := &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel %s", msg), - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) -} - -// exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return errConnClosing - } - if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() - logger.Info("ClientConn asked to exit idle mode when not in idle mode") - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper() - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() - cc.mu.Unlock() - - // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { - return err - } + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) - if exitedIdle { - cc.addTraceEvent("exiting idle mode") + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) } - return nil -} - -// enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - logger.Error("ClientConn asked to enter idle mode when not active") - return nil - } - - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. - conns := cc.conns - cc.conns = make(map[*addrConn]struct{}) - - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() - cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle + cc.resolverWrapper = rWrapper cc.mu.Unlock() - go func() { - cc.addTraceEvent("entering idle mode") - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() - return nil -} - -// validateTransportCredentials performs a series of checks on the configured -// transport credentials. It returns a non-nil error if any of these conditions -// are met: -// - no transport creds and no creds bundle is configured -// - both transport creds and creds bundle are configured -// - creds bundle is configured, but it lacks a transport credentials -// - insecure transport creds configured alongside call creds that require -// transport level security -// -// If none of the above conditions are met, the configured credentials are -// deemed valid and a nil error is returned. -func (cc *ClientConn) validateTransportCredentials() error { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return errTransportCredentialsMissing + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + cc.Connect() + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() } } } - return nil -} -// channelzRegistration registers the newly created ClientConn with channelz and -// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. -// A channelz trace event is emitted for ClientConn creation. If the newly -// created ClientConn is a nested one, i.e a valid parent ClientConn ID is -// specified via a dial option, the trace event is also added to the parent. -// -// Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) channelzRegistration(target string) { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - cc.addTraceEvent("created") - cc.csMgr.channelzID = cc.channelzID + return cc, nil } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -620,9 +474,7 @@ type ClientConn struct { authority string // See determineAuthority(). dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idlenessManager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -643,31 +495,11 @@ type ClientConn struct { sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -707,10 +539,7 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() - // If the ClientConn was not in idle mode, we need to call ExitIdle on the - // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() + cc.balancerWrapper.exitIdle() } func (cc *ClientConn) scWatcher() { @@ -879,7 +708,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -973,6 +801,9 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() ac.resetTransport() @@ -991,62 +822,58 @@ func equalAddresses(a, b []resolver.Address) bool { return true } -// updateAddrs updates ac.addrs with the new addresses list and handles active -// connections or connection attempts. -func (ac *addrConn) updateAddrs(addrs []resolver.Address) { +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) - - if equalAddresses(ac.addrs, addrs) { - ac.mu.Unlock() - return - } - - ac.addrs = addrs - + defer ac.mu.Unlock() + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - // We were not connecting, so do nothing but update the addresses. - ac.mu.Unlock() - return + ac.addrs = addrs + return true } - if ac.state == connectivity.Ready { - // Try to find the connected address. - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { - // We are connected to a valid address, so do nothing but - // update the addresses. - ac.mu.Unlock() - return - } - } + if equalAddresses(ac.addrs, addrs) { + return true } - // We are either connected to the wrong address or currently connecting. - // Stop the current iteration and restart. - - ac.cancel() - ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) - - // We have to defer here because GracefulClose => Close => onClose, which - // requires locking ac.mu. - if ac.transport != nil { - defer ac.transport.GracefulClose() - ac.transport = nil + if ac.state == connectivity.Connecting { + return false } - if len(addrs) == 0 { - ac.updateConnectivityState(connectivity.Idle, nil) + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs } - ac.mu.Unlock() - - // Since we were connecting/connected, we should start a new connection - // attempt. - go ac.resetTransport() + return curAddrFound } // getServerName determines the serverName to be used in the connection @@ -1199,40 +1026,39 @@ func (cc *ClientConn) Close() error { cc.mu.Unlock() return ErrClientConnClosing } - - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr cc.mu.Unlock() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } + cc.blockingpicker.close() if bWrapper != nil { bWrapper.close() } if rWrapper != nil { rWrapper.close() } - if idlenessMgr != nil { - idlenessMgr.close() - } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - cc.addTraceEvent("deleted") + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. @@ -1262,8 +1088,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1277,15 +1102,8 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s - if lastErr == nil { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) - } else { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) - } + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1305,8 +1123,7 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { ac.mu.Lock() - acCtx := ac.ctx - if acCtx.Err() != nil { + if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } @@ -1334,14 +1151,15 @@ func (ac *addrConn) resetTransport() { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) // After exhausting all addresses, the addrConn enters // TRANSIENT_FAILURE. - if acCtx.Err() != nil { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() return } - ac.mu.Lock() ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1356,13 +1174,13 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() case <-b: timer.Stop() - case <-acCtx.Done(): + case <-ac.ctx.Done(): timer.Stop() return } ac.mu.Lock() - if acCtx.Err() == nil { + if ac.state != connectivity.Shutdown { ac.updateConnectivityState(connectivity.Idle, err) } ac.mu.Unlock() @@ -1377,13 +1195,14 @@ func (ac *addrConn) resetTransport() { // tryAllAddrs tries to creates a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - if ctx.Err() != nil { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() return errConnClosing } - ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1397,7 +1216,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - err := ac.createTransport(ctx, addr, copts, connectDeadline) + err := ac.createTransport(addr, copts, connectDeadline) if err == nil { return nil } @@ -1414,20 +1233,19 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c // createTransport creates a connection to addr. It returns an error if the // address was not successfully connected, or updates ac appropriately with the // new transport. -func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ctx) + hctx, hcancel := context.WithCancel(ac.ctx) onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() // adjust params based on GoAwayReason ac.adjustParams(r) - if ctx.Err() != nil { - // Already shut down or connection attempt canceled. tearDown() or - // updateAddrs() already cleared the transport and canceled hctx - // via ac.ctx, and we expected this connection to be closed, so do - // nothing here. + if ac.state == connectivity.Shutdown { + // Already shut down. tearDown() already cleared the transport and + // canceled hctx via ac.ctx, and we expected this connection to be + // closed, so do nothing here. return } hcancel() @@ -1446,7 +1264,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID @@ -1463,7 +1281,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, ac.mu.Lock() defer ac.mu.Unlock() - if ctx.Err() != nil { + if ac.state == connectivity.Shutdown { // This can happen if the subConn was removed while in `Connecting` // state. tearDown() would have set the state to `Shutdown`, but // would not have closed the transport since ac.transport would not @@ -1475,9 +1293,6 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. - // - // This can also happen when updateAddrs is called during a connection - // attempt. go newTr.Close(transport.ErrConnClosing) return nil } @@ -1585,29 +1400,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1735,9 +1527,6 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") -// getResolver finds the scheme in the cc's resolvers or the global registry. -// scheme should always be lowercase (typically by virtue of url.Parse() -// performing proper RFC3986 behavior). func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { @@ -1759,14 +1548,7 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. -// -// The resolver to use is determined based on the scheme in the parsed target -// and the same is stored in `cc.resolverBuilder`. -// -// Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { +func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) var rb resolver.Builder @@ -1778,8 +1560,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget - cc.resolverBuilder = rb - return nil + return rb, nil } } @@ -1794,16 +1575,15 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { parsedTarget, err = parseTarget(canonicalTarget) if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return err + return nil, err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget - cc.resolverBuilder = rb - return nil + return rb, nil } // parseTarget uses RFC 3986 semantics to parse the given target into a @@ -1826,15 +1606,7 @@ func parseTarget(target string) (resolver.Target, error) { // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake // - endpoint from dial target of the form "scheme://[authority]/endpoint" -// -// Stores the determined authority in `cc.authority`. -// -// Returns a non-nil error if the authority returned by the transport -// credentials do not match the authority configured through the dial option. -// -// Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { - dopts := cc.dopts +func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials // (either in its constructor, or through the OverrideServerName() method). @@ -1851,58 +1623,25 @@ func (cc *ClientConn) determineAuthority() error { } authorityFromDialOption := dopts.authority if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) } - endpoint := cc.parsedTarget.Endpoint() - target := cc.target switch { case authorityFromDialOption != "": - cc.authority = authorityFromDialOption + return authorityFromDialOption, nil case authorityFromCreds != "": - cc.authority = authorityFromCreds + return authorityFromCreds, nil case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): // TODO: remove when the unix resolver implements optional interface to // return channel authority. - cc.authority = "localhost" + return "localhost", nil case strings.HasPrefix(endpoint, ":"): - cc.authority = "localhost" + endpoint + return "localhost" + endpoint, nil default: // TODO: Define an optional interface on the resolver builder to return // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - cc.authority = endpoint + return endpoint, nil } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) - return nil -} - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() - return nil } diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go index 934fac2b..0b206a57 100644 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -18,15 +18,7 @@ package codes -import ( - "strconv" - - "google.golang.org/grpc/internal" -) - -func init() { - internal.CanonicalString = canonicalString -} +import "strconv" func (c Code) String() string { switch c { @@ -68,44 +60,3 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } - -func canonicalString(c Code) string { - switch c { - case OK: - return "OK" - case Canceled: - return "CANCELLED" - case Unknown: - return "UNKNOWN" - case InvalidArgument: - return "INVALID_ARGUMENT" - case DeadlineExceeded: - return "DEADLINE_EXCEEDED" - case NotFound: - return "NOT_FOUND" - case AlreadyExists: - return "ALREADY_EXISTS" - case PermissionDenied: - return "PERMISSION_DENIED" - case ResourceExhausted: - return "RESOURCE_EXHAUSTED" - case FailedPrecondition: - return "FAILED_PRECONDITION" - case Aborted: - return "ABORTED" - case OutOfRange: - return "OUT_OF_RANGE" - case Unimplemented: - return "UNIMPLEMENTED" - case Internal: - return "INTERNAL" - case Unavailable: - return "UNAVAILABLE" - case DataLoss: - return "DATA_LOSS" - case Unauthenticated: - return "UNAUTHENTICATED" - default: - return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" - } -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 15a3d510..4866da10 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -38,14 +38,13 @@ import ( func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { - globalDialOptions = append(globalDialOptions, opt...) + extraDialOptions = append(extraDialOptions, opt...) } internal.ClearGlobalDialOptions = func() { - globalDialOptions = nil + extraDialOptions = nil } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption - internal.DisableGlobalDialOptions = newDisableGlobalDialOptions } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -77,7 +76,6 @@ type dialOptions struct { defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder - idleTimeout time.Duration } // DialOption configures how we set up the connection. @@ -85,7 +83,7 @@ type DialOption interface { apply(*dialOptions) } -var globalDialOptions []DialOption +var extraDialOptions []DialOption // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. @@ -98,16 +96,6 @@ type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} -type disableGlobalDialOptions struct{} - -func (disableGlobalDialOptions) apply(*dialOptions) {} - -// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn -// from applying the global DialOptions (set via AddGlobalDialOptions). -func newDisableGlobalDialOptions() DialOption { - return &disableGlobalDialOptions{} -} - // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { @@ -296,9 +284,6 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. -// -// Use of this feature is not recommended. For more information, please see: -// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -310,9 +295,6 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// Use of this feature is not recommended. For more information, please see: -// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md -// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -455,9 +437,6 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// Use of this feature is not recommended. For more information, please see: -// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md -// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -656,23 +635,3 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } - -// WithIdleTimeout returns a DialOption that configures an idle timeout for the -// channel. If the channel is idle for the configured timeout, i.e there are no -// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode -// and as a result the name resolver and load balancer will be shut down. The -// channel will exit idle mode when the Connect() method is called or when an -// RPC is initiated. -// -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func WithIdleTimeout(d time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.idleTimeout = d - }) -} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 142d35f7..8e29a62f 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v4.22.0 +// protoc-gen-go v1.28.1 +// protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index a01a1b4d..a332dfd7 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -35,11 +35,6 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 -const ( - Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" - Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" -) - // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -75,7 +70,7 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) if err != nil { return nil, err } @@ -83,7 +78,7 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . } func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) if err != nil { return nil, err } @@ -171,7 +166,7 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Health_Check_FullMethodName, + FullMethod: "/grpc.health.v1.Health/Check", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/idle.go deleted file mode 100644 index dc3dc72f..00000000 --- a/vendor/google.golang.org/grpc/idle.go +++ /dev/null @@ -1,287 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "math" - "sync" - "sync/atomic" - "time" -) - -// For overriding in unit tests. -var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { - return time.AfterFunc(d, f) -} - -// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter -// and exit from idle mode. -type idlenessEnforcer interface { - exitIdleMode() error - enterIdleMode() error -} - -// idlenessManager defines the functionality required to track RPC activity on a -// channel. -type idlenessManager interface { - onCallBegin() error - onCallEnd() - close() -} - -type noopIdlenessManager struct{} - -func (noopIdlenessManager) onCallBegin() error { return nil } -func (noopIdlenessManager) onCallEnd() {} -func (noopIdlenessManager) close() {} - -// idlenessManagerImpl implements the idlenessManager interface. It uses atomic -// operations to synchronize access to shared state and a mutex to guarantee -// mutual exclusion in a critical section. -type idlenessManagerImpl struct { - // State accessed atomically. - lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. - activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. - activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. - closed int32 // Boolean; True when the manager is closed. - - // Can be accessed without atomics or mutex since these are set at creation - // time and read-only after that. - enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - - // idleMu is used to guarantee mutual exclusion in two scenarios: - // - Opposing intentions: - // - a: Idle timeout has fired and handleIdleTimeout() is trying to put - // the channel in idle mode because the channel has been inactive. - // - b: At the same time an RPC is made on the channel, and onCallBegin() - // is trying to prevent the channel from going idle. - // - Competing intentions: - // - The channel is in idle mode and there are multiple RPCs starting at - // the same time, all trying to move the channel out of idle. Only one - // of them should succeed in doing so, while the other RPCs should - // piggyback on the first one and be successfully handled. - idleMu sync.RWMutex - actuallyIdle bool - timer *time.Timer -} - -// newIdlenessManager creates a new idleness manager implementation for the -// given idle timeout. -func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { - if idleTimeout == 0 { - return noopIdlenessManager{} - } - - i := &idlenessManagerImpl{ - enforcer: enforcer, - timeout: int64(idleTimeout), - } - i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) - return i -} - -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if i.timer == nil { - // Only close sets timer to nil. We are done. - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - i.timer.Reset(d) -} - -// handleIdleTimeout is the timer callback that is invoked upon expiry of the -// configured idle timeout. The channel is considered inactive if there are no -// ongoing calls and no RPC activity since the last time the timer fired. -func (i *idlenessManagerImpl) handleIdleTimeout() { - if i.isClosed() { - return - } - - if atomic.LoadInt32(&i.activeCallsCount) > 0 { - i.resetIdleTimer(time.Duration(i.timeout)) - return - } - - // There has been activity on the channel since we last got here. Reset the - // timer and return. - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { - // Set the timer to fire after a duration of idle timeout, calculated - // from the time the most recent RPC completed. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) - i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) - return - } - - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - i.resetIdleTimer(time.Duration(i.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. - if i.tryEnterIdleMode() { - // Successfully entered idle mode. No timer needed until we exit idle. - return - } - - // Failed to enter idle mode due to a concurrent RPC that kept the channel - // active, or because of an error from the channel. Undo the attempt to - // enter idle, and reset the timer to try again later. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.resetIdleTimer(time.Duration(i.timeout)) -} - -// tryEnterIdleMode instructs the channel to enter idle mode. But before -// that, it performs a last minute check to ensure that no new RPC has come in, -// making the channel active. -// -// Return value indicates whether or not the channel moved to idle mode. -// -// Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (i *idlenessManagerImpl) tryEnterIdleMode() bool { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { - // We raced and lost to a new RPC. Very rare, but stop entering idle. - return false - } - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we - // checked for calls count and activity in handleIdleTimeout(), but - // before the CAS operation. So, we need to check for activity again. - return false - } - - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := i.enforcer.enterIdleMode(); err != nil { - logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. - i.actuallyIdle = true - return true -} - -// onCallBegin is invoked at the start of every RPC. -func (i *idlenessManagerImpl) onCallBegin() error { - if i.isClosed() { - return nil - } - - if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { - // Channel is not idle now. Set the activity bit and allow the call. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) - return nil - } - - // Channel is either in idle mode or is in the process of moving to idle - // mode. Attempt to exit idle mode to allow this RPC. - if err := i.exitIdleMode(); err != nil { - // Undo the increment to calls count, and return an error causing the - // RPC to fail. - atomic.AddInt32(&i.activeCallsCount, -1) - return err - } - - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) - return nil -} - -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (i *idlenessManagerImpl) exitIdleMode() error { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if !i.actuallyIdle { - // This can happen in two scenarios: - // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called - // tryEnterIdleMode(). But before the latter could grab the lock, an RPC - // came in and onCallBegin() noticed that the calls count is negative. - // - Channel is in idle mode, and multiple new RPCs come in at the same - // time, all of them notice a negative calls count in onCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. - // - // Either way, nothing to do here. - return nil - } - - if err := i.enforcer.exitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) - } - - // Undo the idle entry process. This also respects any new RPC attempts. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.actuallyIdle = false - - // Start a new timer to fire after the configured idle timeout. - i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) - return nil -} - -// onCallEnd is invoked at the end of every RPC. -func (i *idlenessManagerImpl) onCallEnd() { - if i.isClosed() { - return - } - - // Record the time at which the most recent call finished. - atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) - - // Decrement the active calls count. This count can temporarily go negative - // when the timer callback is in the process of moving the channel to idle - // mode, but one or more RPCs come in and complete before the timer callback - // can get done with the process of moving to idle mode. - atomic.AddInt32(&i.activeCallsCount, -1) -} - -func (i *idlenessManagerImpl) isClosed() bool { - return atomic.LoadInt32(&i.closed) == 1 -} - -func (i *idlenessManagerImpl) close() { - atomic.StoreInt32(&i.closed, 1) - - i.idleMu.Lock() - i.timer.Stop() - i.timer = nil - i.idleMu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 755fdebc..809d73cc 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -28,13 +28,8 @@ import ( "google.golang.org/grpc/internal/grpcutil" ) -var grpclogLogger = grpclog.Component("binarylog") - -// Logger specifies MethodLoggers for method names with a Log call that -// takes a context. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. +// Logger is the global binary logger. It can be used to get binary logger for +// each method. type Logger interface { GetMethodLogger(methodName string) MethodLogger } @@ -45,6 +40,8 @@ type Logger interface { // It is used to get a MethodLogger for each individual method. var binLogger Logger +var grpclogLogger = grpclog.Component("binarylog") + // SetLogger sets the binary logger. // // Only call this at init time. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 6c3f6322..d71e4417 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -19,7 +19,6 @@ package binarylog import ( - "context" "net" "strings" "sync/atomic" @@ -49,11 +48,8 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. type MethodLogger interface { - Log(context.Context, LogEntryConfig) + Log(LogEntryConfig) } // TruncatingMethodLogger is a method logger that truncates headers and messages @@ -68,9 +64,6 @@ type TruncatingMethodLogger struct { } // NewTruncatingMethodLogger returns a new truncating method logger. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { return &TruncatingMethodLogger{ headerMaxLen: h, @@ -105,7 +98,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } @@ -151,9 +144,6 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (trun } // LogEntryConfig represents the configuration for binary log entry. -// -// This is used in the 1.0 release of gcp/observability, and thus must not be -// deleted or changed. type LogEntryConfig interface { toProto() *binlogpb.GrpcLogEntry } diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 81c2f5fd..9f6a0c12 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -35,7 +35,6 @@ import "sync" // internal/transport/transport.go for an example of this. type Unbounded struct { c chan interface{} - closed bool mu sync.Mutex backlog []interface{} } @@ -48,18 +47,16 @@ func NewUnbounded() *Unbounded { // Put adds t to the unbounded buffer. func (b *Unbounded) Put(t interface{}) { b.mu.Lock() - defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) == 0 { select { case b.c <- t: + b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) + b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -67,10 +64,6 @@ func (b *Unbounded) Put(t interface{}) { // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() - defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -79,6 +72,7 @@ func (b *Unbounded) Load() { default: } } + b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -86,20 +80,6 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. -// -// If the unbounded buffer is closed, the read channel returned by this method -// is closed. func (b *Unbounded) Get() <-chan interface{} { return b.c } - -// Close closes the unbounded buffer. -func (b *Unbounded) Close() { - b.mu.Lock() - defer b.mu.Unlock() - if b.closed { - return - } - b.closed = true - close(b.c) -} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 80fd5c7d..5ba9d94d 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,10 +36,6 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy, which can be enabled by setting the environment - // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go index dd314cfb..821dd0a7 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/observability.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -28,15 +28,9 @@ const ( var ( // ObservabilityConfig is the json configuration for the gcp/observability // package specified directly in the envObservabilityConfig env var. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. ObservabilityConfig = os.Getenv(envObservabilityConfig) // ObservabilityConfigFile is the json configuration for the // gcp/observability specified in a file with the location specified in // envObservabilityConfigFile env var. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1..04136882 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -61,10 +61,11 @@ var ( // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". + // XDSAggregateAndDNS indicates whether processing of aggregated cluster + // and DNS cluster is enabled, which can be enabled by setting the + // environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to + // "true". XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, @@ -78,18 +79,14 @@ var ( // XDSFederation indicates whether federation support is enabled, which can // be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false) // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by + // support for the RLS CLuster Specifier is enabled, which can be enabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) + // "true". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 02224b42..82af70e9 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -63,9 +63,6 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { // Debugf does info logging at verbose level 2. func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. if !Logger.V(2) { return } @@ -76,15 +73,6 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { return } InfoDepth(1, fmt.Sprintf(format, args...)) - -} - -// V reports whether verbosity level l is at least the requested verbose level. -func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index d08e3e90..517ea706 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -72,17 +72,3 @@ func Uint64() uint64 { defer mu.Unlock() return r.Uint64() } - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go deleted file mode 100644 index 37b8d411..00000000 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcsync - -import ( - "context" - "sync" - - "google.golang.org/grpc/internal/buffer" -) - -// CallbackSerializer provides a mechanism to schedule callbacks in a -// synchronized manner. It provides a FIFO guarantee on the order of execution -// of scheduled callbacks. New callbacks can be scheduled by invoking the -// Schedule() method. -// -// This type is safe for concurrent access. -type CallbackSerializer struct { - // Done is closed once the serializer is shut down completely, i.e all - // scheduled callbacks are executed and the serializer has deallocated all - // its resources. - Done chan struct{} - - callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool -} - -// NewCallbackSerializer returns a new CallbackSerializer instance. The provided -// context will be passed to the scheduled callbacks. Users should cancel the -// provided context to shutdown the CallbackSerializer. It is guaranteed that no -// callbacks will be added once this context is canceled, and any pending un-run -// callbacks will be executed before the serializer is shut down. -func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{ - Done: make(chan struct{}), - callbacks: buffer.NewUnbounded(), - } - go t.run(ctx) - return t -} - -// Schedule adds a callback to be scheduled after existing callbacks are run. -// -// Callbacks are expected to honor the context when performing any blocking -// operations, and should return early when the context is canceled. -// -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - t.closedMu.Lock() - defer t.closedMu.Unlock() - - if t.closed { - return false - } - t.callbacks.Put(f) - return true -} - -func (t *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) - - defer close(t.Done) - for ctx.Err() == nil { - select { - case <-ctx.Done(): - // Do nothing here. Next iteration of the for loop will not happen, - // since ctx.Err() would be non-nil. - case callback, ok := <-t.callbacks.Get(): - if !ok { - return - } - t.callbacks.Load() - callback.(func(ctx context.Context))(ctx) - } - } - - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing t.Done. - t.closedMu.Lock() - t.closed = true - backlog = t.fetchPendingCallbacks() - t.callbacks.Close() - t.closedMu.Unlock() - for _, b := range backlog { - b(ctx) - } -} - -func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-t.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - t.callbacks.Load() - default: - return backlog - } - } -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 42ff39c8..0a76d9de 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -58,12 +58,6 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials - // CanonicalString returns the canonical string of the code defined here: - // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - CanonicalString interface{} // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular @@ -72,35 +66,16 @@ var ( // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. AddGlobalServerOptions interface{} // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. ClearGlobalServerOptions func() // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. AddGlobalDialOptions interface{} // func(opt ...DialOption) - // DisableGlobalDialOptions returns a DialOption that prevents the - // ClientConn from applying the global DialOptions (set via - // AddGlobalDialOptions). - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. - DisableGlobalDialOptions interface{} // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. @@ -111,15 +86,9 @@ var ( // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. - // - // This is used in the 1.0 release of gcp/observability, and thus must not be - // deleted or changed. BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using @@ -161,9 +130,6 @@ var ( // // TODO: Remove this function once the RBAC env var is removed. UnregisterRBACHTTPFilterForTesting func() - - // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index c82e608e..b2980f8a 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -76,11 +76,33 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { return addr } -// Validate validates every pair in md with ValidatePair. +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. func Validate(md metadata.MD) error { for k, vals := range md { - if err := ValidatePair(k, vals...); err != nil { - return err + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } } } return nil @@ -96,37 +118,3 @@ func hasNotPrintable(msg string) bool { } return false } - -// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : -// -// - key must contain one or more characters. -// - the characters in the key must be contained in [0-9 a-z _ - .]. -// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. -// - the characters in the every value must be printable (in [%x20-%x7E]). -func ValidatePair(key string, vals ...string) error { - // key should not be empty - if key == "" { - return fmt.Errorf("there is an empty key in the header") - } - // pseudo-header will be ignored - if key[0] == ':' { - return nil - } - // check key, for i that saving a conversion if not using for range - for i := 0; i < len(key); i++ { - r := key[i] - if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { - return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) - } - } - if strings.HasSuffix(key, "-bin") { - return nil - } - // check value - for _, val := range vals { - if hasNotPrintable(val) { - return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) - } - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go deleted file mode 100644 index 11d82afc..00000000 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package serviceconfig - -import ( - "encoding/json" - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// Duration defines JSON marshal and unmarshal methods to conform to the -// protobuf JSON spec defined [here]. -// -// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration -type Duration time.Duration - -func (d Duration) String() string { - return fmt.Sprint(time.Duration(d)) -} - -// MarshalJSON converts from d to a JSON string output. -func (d Duration) MarshalJSON() ([]byte, error) { - ns := time.Duration(d).Nanoseconds() - sec := ns / int64(time.Second) - ns = ns % int64(time.Second) - - var sign string - if sec < 0 || ns < 0 { - sign, sec, ns = "-", -1*sec, -1*ns - } - - // Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision. - str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) - str = strings.TrimSuffix(str, "000") - str = strings.TrimSuffix(str, "000") - str = strings.TrimSuffix(str, ".000") - return []byte(fmt.Sprintf("\"%ss\"", str)), nil -} - -// UnmarshalJSON unmarshals b as a duration JSON string into d. -func (d *Duration) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !strings.HasSuffix(s, "s") { - return fmt.Errorf("malformed duration %q: missing seconds unit", s) - } - neg := false - if s[0] == '-' { - neg = true - s = s[1:] - } - ss := strings.SplitN(s[:len(s)-1], ".", 3) - if len(ss) > 2 { - return fmt.Errorf("malformed duration %q: too many decimals", s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var sec, ns int64 - if len(ss[0]) > 0 { - var err error - if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { - return fmt.Errorf("malformed duration %q: %v", s, err) - } - // Maximum seconds value per the durationpb spec. - const maxProtoSeconds = 315_576_000_000 - if sec > maxProtoSeconds { - return fmt.Errorf("out of range: %q", s) - } - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return fmt.Errorf("malformed duration %q: too many digits after decimal", s) - } - var err error - if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { - return fmt.Errorf("malformed duration %q: %v", s, err) - } - for i := 9; i > len(ss[1]); i-- { - ns *= 10 - } - hasDigits = true - } - if !hasDigits { - return fmt.Errorf("malformed duration %q: contains no numbers", s) - } - - if neg { - sec *= -1 - ns *= -1 - } - - // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. - const maxSeconds = math.MaxInt64 / int64(time.Second) - const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) - const minSeconds = math.MinInt64 / int64(time.Second) - const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) - - if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { - *d = Duration(math.MaxInt64) - } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { - *d = Duration(math.MinInt64) - } else { - *d = Duration(sec*int64(time.Second) + ns) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index be5a9c81..9097385e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -22,7 +22,6 @@ import ( "bytes" "errors" "fmt" - "net" "runtime" "strconv" "sync" @@ -30,7 +29,6 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -488,14 +486,12 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool - conn net.Conn - logger *grpclog.PrefixLogger // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -508,8 +504,6 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, - conn: conn, - logger: logger, } return l } @@ -527,27 +521,15 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the -// activeStreams linked-list. This results in writing of HTTP2 frames into an -// underlying write buffer. When there's no more control frames to read from -// controlBuf, loopy flushes the write buffer. As an optimization, to increase -// the batch size for each flush, loopy yields the processor, once if the batch -// size is too low to give stream goroutines a chance to fill it up. -// -// Upon exiting, if the error causing the exit is not an I/O error, run() -// flushes and closes the underlying connection. Otherwise, the connection is -// left open to allow the I/O error to be encountered by the reader instead. +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { - defer func() { - if l.logger.V(logLevel) { - l.logger.Infof("loopyWriter exiting with error: %v", err) - } - if !isIOError(err) { - l.framer.writer.Flush() - l.conn.Close() - } - l.cbuf.finish() - }() + // Always flush the writer before exiting in case there are pending frames + // to be sent. + defer l.framer.writer.Flush() for { it, err := l.cbuf.get(true) if err != nil { @@ -599,11 +581,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return + return nil } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -611,9 +593,10 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return + return nil } } + return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -621,11 +604,13 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - l.applySettings(s.ss) + if err := l.applySettings(s.ss); err != nil { + return err + } return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) { +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { str := &outStream{ id: h.streamID, state: empty, @@ -633,14 +618,15 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) { wq: h.wq, } l.estdStreams[h.streamID] = str + return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - if l.logger.V(logLevel) { - l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) + if logger.V(logLevel) { + logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) } return nil } @@ -695,8 +681,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - if l.logger.V(logLevel) { - l.logger.Warningf("Encountered error while encoding headers: %v", err) + if logger.V(logLevel) { + logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) } } } @@ -734,10 +720,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) { +func (l *loopyWriter) preprocessData(df *dataFrame) error { str, ok := l.estdStreams[df.streamID] if !ok { - return + return nil } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -746,6 +732,7 @@ func (l *loopyWriter) preprocessData(df *dataFrame) { str.state = active l.activeStreams.enqueue(str) } + return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -756,8 +743,9 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { o.resp <- l.sendQuota + return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -775,7 +763,6 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { } } if l.draining && len(l.estdStreams) == 0 { - // Flush and close the connection; we are done with it. return errors.New("finished processing active streams while in draining mode") } return nil @@ -811,7 +798,6 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - // Flush and close the connection; we are done with it. return errors.New("received GOAWAY with no active streams") } } @@ -830,10 +816,17 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } +func (l *loopyWriter) closeConnectionHandler() error { + // Exit loopyWriter entirely by returning an error here. This will lead to + // the transport closing the connection, and, ultimately, transport + // closure. + return ErrConnClosing +} + func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: - l.incomingWindowUpdateHandler(i) + return l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -843,7 +836,7 @@ func (l *loopyWriter) handle(i interface{}) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - l.registerStreamHandler(i) + return l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) case *earlyAbortStream: @@ -851,24 +844,21 @@ func (l *loopyWriter) handle(i interface{}) error { case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - l.preprocessData(i) + return l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - l.outFlowControlSizeRequestHandler(i) + return l.outFlowControlSizeRequestHandler(i) case closeConnection: - // Just return a non-I/O error and run() will flush and close the - // connection. - return ErrConnClosing + return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } - return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) { +func (l *loopyWriter) applySettings(ss []http2.Setting) error { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -887,6 +877,7 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) { updateHeaderTblSize(l.hEnc, s.Val) } } + return nil } // processData removes the first stream from active streams, writes out at most 16KB @@ -920,7 +911,7 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, err + return false, nil } } else { l.activeStreams.enqueue(str) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3f..e6626bf9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,7 +39,6 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -84,7 +83,6 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentSubtype: contentSubtype, stats: stats, } - st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) @@ -152,14 +150,13 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler - logger *grpclog.PrefixLogger + stats []stats.Handler } func (ht *serverHandlerTransport) Close(err error) { ht.closeOnce.Do(func() { - if ht.logger.V(logLevel) { - ht.logger.Infof("Closing: %v", err) + if logger.V(logLevel) { + logger.Infof("Closing serverHandlerTransport: %v", err) } close(ht.closedCh) }) @@ -453,7 +450,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain() { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 326bf084..79ee8aea 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -38,7 +38,6 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" @@ -146,7 +145,6 @@ type http2Client struct { bufferPool *bufferPool connectionID uint64 - logger *grpclog.PrefixLogger } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { @@ -246,7 +244,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. if logger.V(logLevel) { - logger.Infof("Aborting due to connect deadline expiring: %v", err) + logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) } conn.Close() } @@ -348,7 +346,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts bufferPool: newBufferPool(), onClose: onClose, } - t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -447,8 +444,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.run() + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) + } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() return t, nil @@ -785,7 +789,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() - if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + if t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } @@ -862,8 +866,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } } if transportDrainRequired { - if t.logger.V(logLevel) { - t.logger.Infof("Draining transport: t.nextID > MaxStreamID") + if logger.V(logLevel) { + logger.Infof("transport: t.nextID > MaxStreamID. Draining") } t.GracefulClose() } @@ -955,8 +959,8 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } - if t.logger.V(logLevel) { - t.logger.Infof("Closing: %v", err) + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. @@ -1012,8 +1016,8 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } - if t.logger.V(logLevel) { - t.logger.Infof("GracefulClose called") + if logger.V(logLevel) { + logger.Infof("transport: GracefulClose called") } t.onClose(GoAwayInvalid) t.state = draining @@ -1177,8 +1181,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - if t.logger.V(logLevel) { - t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) + if logger.V(logLevel) { + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode) } statusCode = codes.Unknown } @@ -1260,12 +1264,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { - // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug - // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is - // enabled by default and double the configure KEEPALIVE_TIME used for new connections - // on that channel. - logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + if logger.V(logLevel) { + logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } } id := f.LastStreamID if id > 0 && id%2 == 0 { @@ -1337,7 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutex to be held by +// It expects a lock on transport's mutext to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index ec4eef21..bc3da706 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,9 +35,7 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" @@ -131,8 +129,6 @@ type http2Server struct { // This lock may not be taken if mu is already held. maxStreamMu sync.Mutex maxStreamID uint32 // max stream ID ever seen - - logger *grpclog.PrefixLogger } // NewServerTransport creates a http2 transport with conn and configuration @@ -171,10 +167,15 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - if config.MaxStreams != math.MaxUint32 { + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: config.MaxStreams, + Val: maxStreams, }) } dynamicWindow := true @@ -253,7 +254,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), - maxStreams: config.MaxStreams, + maxStreams: maxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, @@ -266,7 +267,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } - t.logger = prefixLoggerForServerTransport(t) // Add peer information to the http2server context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -331,9 +331,14 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - t.loopy.run() + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) + } + t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() go t.keepalive() @@ -378,7 +383,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // if false, content-type was missing or invalid isGRPC = false contentType = "" - mdata = make(metadata.MD, len(frame.Fields)) + mdata = make(map[string][]string) httpMethod string // these are set if an error is encountered while parsing the headers protocolError bool @@ -399,17 +404,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( mdata[hf.Name] = append(mdata[hf.Name], hf.Value) s.contentSubtype = contentSubtype isGRPC = true - - case "grpc-accept-encoding": - mdata[hf.Name] = append(mdata[hf.Name], hf.Value) - if hf.Value == "" { - continue - } - compressors := hf.Value - if s.clientAdvertisedCompressors != "" { - compressors = s.clientAdvertisedCompressors + "," + compressors - } - s.clientAdvertisedCompressors = compressors case "grpc-encoding": s.recvCompress = hf.Value case ":method": @@ -425,8 +419,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // "Transports must consider requests containing the Connection header // as malformed." - A41 case "connection": - if t.logger.V(logLevel) { - t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") } protocolError = true default: @@ -436,7 +430,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) - t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } mdata[hf.Name] = append(mdata[hf.Name], v) @@ -450,8 +444,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // error, this takes precedence over a client not speaking gRPC. if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) - if t.logger.V(logLevel) { - t.logger.Infof("Aborting the stream early: %v", errMsg) + if logger.V(logLevel) { + logger.Errorf("transport: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ httpStatus: http.StatusBadRequest, @@ -545,9 +539,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() - errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) - if t.logger.V(logLevel) { - t.logger.Infof("Aborting the stream early: %v", errMsg) + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + if logger.V(logLevel) { + logger.Infof("transport: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ httpStatus: 405, @@ -563,8 +557,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( var err error if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() - if t.logger.V(logLevel) { - t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) } stat, ok := status.FromError(err) if !ok { @@ -601,7 +595,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: mdata.Copy(), + Header: metadata.MD(mdata).Copy(), } sh.HandleRPC(s.ctx, inHeader) } @@ -638,8 +632,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - if t.logger.V(logLevel) { - t.logger.Warningf("Encountered http2.StreamError: %v", se) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) } t.mu.Lock() s := t.activeStreams[se.StreamID] @@ -682,8 +676,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - if t.logger.V(logLevel) { - t.logger.Infof("Received unsupported frame type %T", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } } } @@ -942,8 +936,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if t.logger.V(logLevel) { - t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if logger.V(logLevel) { + logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } @@ -1056,7 +1050,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) + logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -1161,18 +1155,18 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain("max_idle") + t.Drain() return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain("max_age") + t.Drain() ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: // Close the connection after grace period. - if t.logger.V(logLevel) { - t.logger.Infof("Closing server transport due to maximum connection age") + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to maximum connection age.") } t.controlBuf.put(closeConnection{}) case <-t.done: @@ -1223,8 +1217,8 @@ func (t *http2Server) Close(err error) { t.mu.Unlock() return } - if t.logger.V(logLevel) { - t.logger.Infof("Closing: %v", err) + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) } t.state = closing streams := t.activeStreams @@ -1232,8 +1226,8 @@ func (t *http2Server) Close(err error) { t.mu.Unlock() t.controlBuf.finish() close(t.done) - if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { - t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) } channelz.RemoveEntry(t.channelzID) // Cancel all active streams. @@ -1313,14 +1307,14 @@ func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Server) Drain(debugData string) { +func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() if t.drainEvent != nil { return } t.drainEvent = grpcsync.NewEvent() - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1350,6 +1344,9 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, err } if retErr != nil { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() return false, retErr } return true, nil @@ -1362,7 +1359,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19cbb18f..2c601a86 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -21,7 +21,6 @@ package transport import ( "bufio" "encoding/base64" - "errors" "fmt" "io" "math" @@ -38,6 +37,7 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) @@ -85,6 +85,7 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } + logger = grpclog.Component("transport") ) // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -329,8 +330,7 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) - return n, toIOError(err) + return w.conn.Write(b) } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) @@ -352,30 +352,10 @@ func (w *bufWriter) Flush() error { return nil } _, w.err = w.conn.Write(w.buf[:w.offset]) - w.err = toIOError(w.err) w.offset = 0 return w.err } -type ioError struct { - error -} - -func (i ioError) Unwrap() error { - return i.error -} - -func isIOError(err error) bool { - return errors.As(err, &ioError{}) -} - -func toIOError(err error) error { - if err == nil { - return nil - } - return ioError{error: err} -} - type framer struct { writer *bufWriter fr *http2.Framer diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go deleted file mode 100644 index 42ed2b07..00000000 --- a/vendor/google.golang.org/grpc/internal/transport/logging.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "fmt" - - "google.golang.org/grpc/grpclog" - internalgrpclog "google.golang.org/grpc/internal/grpclog" -) - -var logger = grpclog.Component("transport") - -func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) -} - -func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) -} - -func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) -} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aa1c8965..0ac77ea4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -257,9 +257,6 @@ type Stream struct { fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) @@ -348,24 +345,8 @@ func (s *Stream) RecvCompress() string { } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() string { - return s.clientAdvertisedCompressors +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str } // Done returns a channel which is closed when it receives the final status @@ -726,7 +707,7 @@ type ServerTransport interface { RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain(debugData string) + Drain() // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf1..fb4a88f5 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -91,11 +91,7 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - out := make(MD, len(md)) - for k, v := range md { - out[k] = copyOf(v) - } - return out + return Join(md) } // Get obtains the values for a given key. @@ -175,11 +171,8 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - kvCopy := make([]string, 0, len(kv)) - for i := 0; i < len(kv); i += 2 { - kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) - } - added[len(added)-1] = kvCopy + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 02f97595..c525dc07 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -36,7 +36,6 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool - idle bool blockingCh chan struct{} picker balancer.Picker } @@ -48,11 +47,7 @@ func newPickerWrapper() *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. + if pw.done { pw.mu.Unlock() return } @@ -68,8 +63,10 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { // - wraps the done function in the passed in result to increment the calls // failed or calls succeeded channelz counter before invoking the actual // done function. -func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { - ac := acbw.ac +func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() ac.incrCallsStarted() done := result.Done result.Done = func(b balancer.DoneInfo) { @@ -155,14 +152,14 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acbw, ok := pickResult.SubConn.(*acBalancerWrapper) + acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acbw.ac.getReadyTransport(); t != nil { + if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { - doneChannelzWrapper(acbw, &pickResult) + doneChannelzWrapper(acw, &pickResult) return t, pickResult, nil } return t, pickResult, nil @@ -190,25 +187,6 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) - pw.idle = false -} - // dropError is a wrapper error that indicates the LB policy wishes to drop the // RPC and not retry it. type dropError struct { diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index abe266b0..fc91b4d2 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -19,15 +19,11 @@ package grpc import ( - "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/serviceconfig" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -47,28 +43,10 @@ func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } -type pfConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - - // If set to true, instructs the LB policy to shuffle the order of the list - // of addresses received from the name resolver before attempting to - // connect to them. - ShuffleAddressList bool `json:"shuffleAddressList"` -} - -func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - cfg := &pfConfig{} - if err := json.Unmarshal(js, cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) - } - return cfg, nil -} - type pickfirstBalancer struct { state connectivity.State cc balancer.ClientConn subConn balancer.SubConn - cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { @@ -91,8 +69,7 @@ func (b *pickfirstBalancer) ResolverError(err error) { } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - addrs := state.ResolverState.Addresses - if len(addrs) == 0 { + if len(state.ResolverState.Addresses) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { @@ -105,23 +82,12 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return balancer.ErrBadResolverState } - if state.BalancerConfig != nil { - cfg, ok := state.BalancerConfig.(*pfConfig) - if !ok { - return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) - } - b.cfg = cfg - } - - if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) - } if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, addrs) + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) return nil } - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) if err != nil { if logger.V(2) { logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) @@ -153,6 +119,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b } return } + b.state = state.ConnectivityState if state.ConnectivityState == connectivity.Shutdown { b.subConn = nil return @@ -165,21 +132,11 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, }) case connectivity.Connecting: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. See A62. - return - } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) case connectivity.Idle: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. Also kick the - // subConn out of Idle into Connecting. See A62. - b.subConn.Connect() - return - } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &idlePicker{subConn: subConn}, @@ -190,7 +147,6 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{err: state.ConnectionError}, }) } - b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 353c10b6..654e9ce6 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,13 +22,13 @@ package resolver import ( "context" - "fmt" "net" "net/url" "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -41,9 +41,8 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will -// be used as the scheme registered with this builder. The registry is case -// sensitive, and schemes should not contain any uppercase characters. +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -124,7 +123,7 @@ type Address struct { Attributes *attributes.Attributes // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attributes do not affect SubConn + // for consumption by the LB policy. These attribes do not affect SubConn // creation, connection establishment, handshaking, etc. BalancerAttributes *attributes.Attributes @@ -151,17 +150,7 @@ func (a Address) Equal(o Address) bool { // String returns JSON formatted string representation of the address. func (a Address) String() string { - var sb strings.Builder - sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) - sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) - if a.Attributes != nil { - sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) - } - if a.BalancerAttributes != nil { - sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) - } - sb.WriteString("}") - return sb.String() + return pretty.ToJSON(a) } // BuildOptions includes additional information for the builder to create @@ -214,15 +203,6 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. - // - // If an error is returned, the resolver should try to resolve the - // target again. The resolver should use a backoff timer to prevent - // overloading the server with requests. If a resolver is certain that - // reresolving will not change the result, e.g. because it is - // a watch-based resolver, returned errors can be ignored. - // - // If the resolved State is the same as the last reported one, calling - // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling @@ -300,10 +280,8 @@ type Builder interface { // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. Scheme is defined - // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned - // string should not contain uppercase characters, as they will not match - // the parsed target's scheme as defined in RFC 3986. + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. Scheme() string } diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index b408b368..05a9d4e0 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,11 +19,11 @@ package grpc import ( - "context" "strings" "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -31,192 +31,129 @@ import ( "google.golang.org/grpc/serviceconfig" ) -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. + incomingMu sync.Mutex // Synchronizes all the incoming calls. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err + cc: cc, + done: grpcsync.NewEvent(), } - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + } + var err error + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + if err != nil { + return nil, err + } return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) } - ccr.resolver.ResolveNow(o) + ccr.resolverMu.Unlock() } func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() + ccr.resolverMu.Lock() + ccr.resolver.Close() + ccr.done.Fire() + ccr.resolverMu.Unlock() } -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { return nil } - return <-errCh + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + return balancer.ErrBadResolverState + } + return nil } -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) } -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. +// NewAddress is called by the resolver implementation to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.cc.dopts.disableServiceConfig { + channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) } -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig @@ -235,5 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2030736a..cb7020eb 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -159,7 +159,6 @@ type callInfo struct { contentSubtype string codec baseCodec maxRetryRPCBufferSize int - onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -296,41 +295,6 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} -// OnFinish returns a CallOption that configures a callback to be called when -// the call completes. The error passed to the callback is the status of the -// RPC, and may be nil. The onFinish callback provided will only be called once -// by gRPC. This is mainly used to be used by streaming interceptors, to be -// notified when the RPC completes along with information about the status of -// the RPC. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func OnFinish(onFinish func(err error)) CallOption { - return OnFinishCallOption{ - OnFinish: onFinish, - } -} - -// OnFinishCallOption is CallOption that indicates a callback to be called when -// the call completes. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type OnFinishCallOption struct { - OnFinish func(error) -} - -func (o OnFinishCallOption) before(c *callInfo) error { - c.onFinish = append(c.onFinish, o.OnFinish) - return nil -} - -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} - // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default // 4MB. @@ -694,13 +658,12 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), - SentTime: t, + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, } } @@ -721,7 +684,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - compressedLength int // The compressed length got from wire. + wireLength int // The compressed length got from wire. uncompressedBytes []byte } @@ -731,7 +694,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, err } if payInfo != nil { - payInfo.compressedLength = len(d) + payInfo.wireLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8869cc90..d5a6e78b 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -43,8 +43,8 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -74,10 +74,10 @@ func init() { srv.drainServerTransports(addr) } internal.AddGlobalServerOptions = func(opt ...ServerOption) { - globalServerOptions = append(globalServerOptions, opt...) + extraServerOptions = append(extraServerOptions, opt...) } internal.ClearGlobalServerOptions = func() { - globalServerOptions = nil + extraServerOptions = nil } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption @@ -115,6 +115,12 @@ type serviceInfo struct { mdata interface{} } +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream +} + // Server is a gRPC server to serve RPC requests. type Server struct { opts serverOptions @@ -139,7 +145,7 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan func() + serverWorkerChannels []chan *serverWorkerData } type serverOptions struct { @@ -171,14 +177,13 @@ type serverOptions struct { } var defaultServerOptions = serverOptions{ - maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } -var globalServerOptions []ServerOption +var extraServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -382,9 +387,6 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { - if n == 0 { - n = math.MaxUint32 - } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -558,40 +560,47 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { const serverWorkerResetThreshold = 1 << 16 // serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// data to be fed by serveStreams. This allows different requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // // [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker() { - for completed := 0; completed < serverWorkerResetThreshold; completed++ { - f, ok := <-s.serverWorkerChannel +func (s *Server) serverWorker(ch chan *serverWorkerData) { + // To make sure all server workers don't reset at the same time, choose a + // random number of iterations before resetting. + threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) + for completed := 0; completed < threshold; completed++ { + data, ok := <-ch if !ok { return } - f() + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) + data.wg.Done() } - go s.serverWorker() + go s.serverWorker(ch) } -// initServerWorkers creates worker goroutines and a channel to process incoming +// initServerWorkers creates worker goroutines and channels to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) for i := uint32(0); i < s.opts.numServerWorkers; i++ { - go s.serverWorker() + s.serverWorkerChannels[i] = make(chan *serverWorkerData) + go s.serverWorker(s.serverWorkerChannels[i]) } } func (s *Server) stopServerWorkers() { - close(s.serverWorkerChannel) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + close(s.serverWorkerChannels[i]) + } } // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions - for _, o := range globalServerOptions { + for _, o := range extraServerOptions { o.apply(&opts) } for _, o := range opt { @@ -888,7 +897,7 @@ func (s *Server) drainServerTransports(addr string) { s.mu.Lock() conns := s.conns[addr] for st := range conns { - st.Drain("") + st.Drain() } s.mu.Unlock() } @@ -936,26 +945,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + var roundRobinCounter uint32 st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) - - streamQuota.acquire() - f := func() { - defer streamQuota.release() - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - } - if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannel <- f: - return + case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: default: // If all stream workers are busy, fallback to the default code path. + go func() { + s.handleStream(st, stream, s.traceInfo(st, stream)) + wg.Done() + }() } + } else { + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() } - go f() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -1044,7 +1053,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain("") + st.Drain() } if s.conns[addr] == nil { @@ -1243,7 +1252,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. logEntry.PeerAddr = peer.Addr } for _, binlog := range binlogs { - binlog.Log(ctx, logEntry) + binlog.Log(logEntry) } } @@ -1254,7 +1263,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor - var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -1275,18 +1283,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - sendCompressorName = cp.Type() + stream.SetSendCompress(cp.Type()) } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - sendCompressorName = comp.Name() - } - } - - if sendCompressorName != "" { - if err := stream.SetSendCompress(sendCompressorName); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + stream.SetSendCompress(rc) } } @@ -1310,12 +1312,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - Length: len(d), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - Data: d, + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength + headerLen, + Data: d, + Length: len(d), }) } if len(binlogs) != 0 { @@ -1323,7 +1324,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(cm) } } if trInfo != nil { @@ -1356,7 +1357,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(sh) } } st := &binarylog.ServerTrailer{ @@ -1364,7 +1365,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(st) } } return appErr @@ -1374,11 +1375,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } opts := &transport.Options{Last: true} - // Server handler could have set new compressor by calling SetSendCompressor. - // In case it is set, we need to use it for compressing outbound message. - if stream.SendCompress() != sendCompressorName { - comp = encoding.GetCompressor(stream.SendCompress()) - } if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). @@ -1406,8 +1402,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(sh) + binlog.Log(st) } } return err @@ -1421,8 +1417,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(sh) + binlog.Log(sm) } } if channelz.IsOn() { @@ -1434,16 +1430,17 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? + err = t.WriteStatus(stream, statusOK) if len(binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(st) } } - return t.WriteStatus(stream, statusOK) + return err } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1577,7 +1574,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(logEntry) } } @@ -1600,18 +1597,12 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - ss.sendCompressorName = s.opts.cp.Type() + stream.SetSendCompress(s.opts.cp.Type()) } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - ss.sendCompressorName = rc - } - } - - if ss.sendCompressorName != "" { - if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + stream.SetSendCompress(rc) } } @@ -1649,16 +1640,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.SetError() ss.mu.Unlock() } + t.WriteStatus(ss.s, appStatus) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(st) } } - t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1667,16 +1658,17 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } + err = t.WriteStatus(ss.s, statusOK) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(st) } } - return t.WriteStatus(ss.s, statusOK) + return err } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { @@ -1854,7 +1846,7 @@ func (s *Server) GracefulStop() { if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain("graceful_stop") + st.Drain() } } s.drain = true @@ -1943,60 +1935,6 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } -// SetSendCompressor sets a compressor for outbound messages from the server. -// It must not be called after any event that causes headers to be sent -// (see ServerStream.SetHeader for the complete list). Provided compressor is -// used when below conditions are met: -// -// - compressor is registered via encoding.RegisterCompressor -// - compressor name must exist in the client advertised compressor names -// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to -// get client supported compressor names. -// -// The context provided must be the context passed to the server's handler. -// It must be noted that compressor name encoding.Identity disables the -// outbound compression. -// By default, server messages will be sent using the same compressor with -// which request messages were sent. -// -// It is not safe to call SetSendCompressor concurrently with SendHeader and -// SendMsg. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) - if !ok || stream == nil { - return fmt.Errorf("failed to fetch the stream from the given context") - } - - if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { - return fmt.Errorf("unable to set send compressor: %w", err) - } - - return stream.SetSendCompress(name) -} - -// ClientSupportedCompressors returns compressor names advertised by the client -// via grpc-accept-encoding header. -// -// The context provided must be the context passed to the server's handler. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) - if !ok || stream == nil { - return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) - } - - return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil -} - // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. // @@ -2031,51 +1969,3 @@ type channelzServer struct { func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } - -// validateSendCompressor returns an error when given compressor name cannot be -// handled by the server or the client based on the advertised compressors. -func validateSendCompressor(name, clientCompressors string) error { - if name == encoding.Identity { - return nil - } - - if !grpcutil.IsCompressorNameRegistered(name) { - return fmt.Errorf("compressor not registered %q", name) - } - - for _, c := range strings.Split(clientCompressors, ",") { - if c == name { - return nil // found match - } - } - return fmt.Errorf("client does not support compressor %q", name) -} - -// atomicSemaphore implements a blocking, counting semaphore. acquire should be -// called synchronously; release may be called asynchronously. -type atomicSemaphore struct { - n int64 - wait chan struct{} -} - -func (q *atomicSemaphore) acquire() { - if atomic.AddInt64(&q.n, -1) < 0 { - // We ran out of quota. Block until a release happens. - <-q.wait - } -} - -func (q *atomicSemaphore) release() { - // N.B. the "<= 0" check below should allow for this to work with multiple - // concurrent calls to acquire, but also note that with synchronous calls to - // acquire, as our system does, n will never be less than -1. There are - // fairness issues (queuing) to consider if this was to be generalized. - if atomic.AddInt64(&q.n, 1) <= 0 { - // An acquire was waiting on us. Unblock it. - q.wait <- struct{}{} - } -} - -func newHandlerQuota(n uint32) *atomicSemaphore { - return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} -} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 0df11fc0..f22acace 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -23,6 +23,8 @@ import ( "errors" "fmt" "reflect" + "strconv" + "strings" "time" "google.golang.org/grpc/codes" @@ -104,8 +106,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff internalserviceconfig.Duration - MaxBackoff internalserviceconfig.Duration + InitialBackoff string + MaxBackoff string BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -127,6 +129,50 @@ type retryThrottlingPolicy struct { TokenRatio float64 } +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + type jsonName struct { Service string Method string @@ -155,7 +201,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *internalserviceconfig.Duration + Timeout *string MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -206,10 +252,15 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } + d, err := parseDuration(m.Timeout) + if err != nil { + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: (*time.Duration)(m.Timeout), + Timeout: d, } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) @@ -261,10 +312,18 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || + *ib <= 0 || + *mb <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -273,8 +332,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: time.Duration(jrp.InitialBackoff), - MaxBackoff: time.Duration(jrp.MaxBackoff), + InitialBackoff: *ib, + MaxBackoff: *mb, BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 7a552a9b..0285dcc6 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -67,18 +67,10 @@ type InPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - - // Length is the size of the uncompressed payload data. Does not include any - // framing (gRPC or HTTP/2). + // Length is the length of uncompressed data. Length int - // CompressedLength is the size of the compressed payload data. Does not - // include any framing (gRPC or HTTP/2). Same as Length if compression not - // enabled. - CompressedLength int - // WireLength is the size of the compressed payload data plus gRPC framing. - // Does not include HTTP/2 framing. + // WireLength is the length of data on wire (compressed, signed, encrypted). WireLength int - // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -137,15 +129,9 @@ type OutPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the size of the uncompressed payload data. Does not include any - // framing (gRPC or HTTP/2). + // Length is the length of uncompressed data. Length int - // CompressedLength is the size of the compressed payload data. Does not - // include any framing (gRPC or HTTP/2). Same as Length if compression not - // enabled. - CompressedLength int - // WireLength is the size of the compressed payload data plus gRPC framing. - // Does not include HTTP/2 framing. + // WireLength is the length of data on wire (compressed, signed, encrypted). WireLength int // SentTime is the time when the payload is sent. SentTime time.Time diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index bcf2e4d8..623be39f 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -77,18 +77,9 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type -// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped -// errors, the message returned contains the entire err.Error() text and not -// just the wrapped status. In that case, ok is true. +// *Status`, the appropriate Status is returned. // -// - If err is nil, a Status is returned with codes.OK and no message, and ok -// is true. -// -// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` -// returns nil (which maps to Codes.OK), or if err wraps a type -// satisfying this, a Status is returned with codes.Unknown and err's -// Error() message, and ok is false. +// - If err is nil, a Status is returned with codes.OK and no message. // // - Otherwise, err is an error not compatible with this package. In this // case, a Status is returned with codes.Unknown and err's Error() message, @@ -97,29 +88,10 @@ func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - type grpcstatus interface{ GRPCStatus() *Status } - if gs, ok := err.(grpcstatus); ok { - if gs.GRPCStatus() == nil { - // Error has status nil, which maps to codes.OK. There - // is no sensible behavior for this, so we turn it into - // an error with codes.Unknown and discard the existing - // status. - return New(codes.Unknown, err.Error()), false - } - return gs.GRPCStatus(), true - } - var gs grpcstatus - if errors.As(err, &gs) { - if gs.GRPCStatus() == nil { - // Error wraps an error that has status nil, which maps - // to codes.OK. There is no sensible behavior for this, - // so we turn it into an error with codes.Unknown and - // discard the existing status. - return New(codes.Unknown, err.Error()), false - } - p := gs.GRPCStatus().Proto() - p.Message = err.Error() - return status.FromProto(p), true + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true } return New(codes.Unknown, err.Error()), false } @@ -131,16 +103,19 @@ func Convert(err error) *Status { return s } -// Code returns the Code of the error if it is a Status error or if it wraps a -// Status error. If that is not the case, it returns codes.OK if err is nil, or -// codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - - return Convert(err).Code() + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown } // FromContextError converts a context error or wrapped context error into a diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 10092685..93231af2 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -123,9 +123,6 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - // - // It is not safe to modify the message after calling SendMsg. Tracing - // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On @@ -155,11 +152,6 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return nil, err - } - defer cc.idlenessMgr.onCallEnd() - // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -176,19 +168,10 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { - // validate md + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) } - // validate added - for _, kvs := range added { - for i := 0; i < len(kvs); i += 2 { - if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - } - } } if channelz.IsOn() { cc.incrCallsStarted() @@ -369,7 +352,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } } for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) + binlog.Log(logEntry) } } @@ -477,7 +460,7 @@ func (a *csAttempt) newStream() error { // It is safe to overwrite the csAttempt's context here, since all state // maintained in it are local to the attempt. When the attempt has to be // retried, a new instance of csAttempt will be created. - if a.pickResult.Metadata != nil { + if a.pickResult.Metatada != nil { // We currently do not have a function it the metadata package which // merges given metadata with existing metadata in a context. Existing // function `AppendToOutgoingContext()` takes a variadic argument of key @@ -487,7 +470,7 @@ func (a *csAttempt) newStream() error { // in a form passable to AppendToOutgoingContext(), or create a version // of AppendToOutgoingContext() that accepts a metadata.MD. md, _ := metadata.FromOutgoingContext(a.ctx) - md = metadata.Join(md, a.pickResult.Metadata) + md = metadata.Join(md, a.pickResult.Metatada) a.ctx = metadata.NewOutgoingContext(a.ctx, md) } @@ -817,7 +800,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { } cs.serverHeaderBinlogged = true for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) + binlog.Log(logEntry) } } return m, nil @@ -898,7 +881,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { Message: data, } for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, cm) + binlog.Log(cm) } } return err @@ -922,7 +905,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error { Message: recvInfo.uncompressedBytes, } for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, sm) + binlog.Log(sm) } } if err != nil || !cs.desc.ServerStreams { @@ -943,7 +926,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error { logEntry.PeerAddr = peer.Addr } for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) + binlog.Log(logEntry) } } } @@ -970,7 +953,7 @@ func (cs *clientStream) CloseSend() error { OnClientSide: true, } for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, chc) + binlog.Log(chc) } } // We never returned an error here for reasons. @@ -988,9 +971,6 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true - for _, onFinish := range cs.callInfo.onFinish { - onFinish(err) - } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -1012,7 +992,7 @@ func (cs *clientStream) finish(err error) { OnClientSide: true, } for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) + binlog.Log(c) } } if err == nil { @@ -1101,10 +1081,9 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -1273,19 +1252,14 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin as.p = &parser{r: s} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on stream context to cleanup when the stream context is - // canceled. Also listen for the addrConn's context in case the - // addrConn is closed or reconnects to a different address. In all - // other cases, an error should already be injected into the recv - // buffer by the transport, which the client will eventually receive, - // and then we will cancel the stream's context in - // addrConnStream.finish. + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. go func() { - ac.mu.Lock() - acCtx := ac.ctx - ac.mu.Unlock() select { - case <-acCtx.Done(): + case <-ac.ctx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1537,8 +1511,6 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor - sendCompressorName string - maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1586,7 +1558,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, sh) + binlog.Log(sh) } } return err @@ -1631,13 +1603,6 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() - // Server handler could have set new compressor by calling SetSendCompressor. - // In case it is set, we need to use it for compressing outbound message. - if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { - ss.comp = encoding.GetCompressor(sendCompressorsName) - ss.sendCompressorName = sendCompressorsName - } - // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { @@ -1659,14 +1624,14 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, sh) + binlog.Log(sh) } } sm := &binarylog.ServerMessage{ Message: data, } for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, sm) + binlog.Log(sm) } } if len(ss.statsHandler) != 0 { @@ -1714,7 +1679,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, chc) + binlog.Log(chc) } } return err @@ -1730,10 +1695,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), }) } } @@ -1742,7 +1706,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { Message: payInfo.uncompressedBytes, } for _, binlog := range ss.binlogs { - binlog.Log(ss.ctx, cm) + binlog.Log(cm) } } return nil diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 3cc75406..fe552c31 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.56.3" +const Version = "1.53.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index a8e4732b..3728aed0 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -41,8 +41,16 @@ if [[ "$1" = "-install" ]]; then github.com/client9/misspell/cmd/misspell popd if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. + if [[ "${TRAVIS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -60,7 +68,8 @@ fi # - Check that generated proto files are up to date. if [[ -z "${VET_SKIP_PROTO}" ]]; then - make proto && git status --porcelain 2>&1 | fail_on_output || \ + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) fi diff --git a/vendor/modules.txt b/vendor/modules.txt index 8df05562..e224c134 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -473,7 +473,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.7.0 +# golang.org/x/oauth2 v0.5.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -535,12 +535,12 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 +# google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f ## explicit; go 1.19 google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.56.3 +# google.golang.org/grpc v1.53.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes