Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

balancergroup: remove mentions of locality from comments #7476

Merged
merged 1 commit into from
Aug 6, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 18 additions & 24 deletions internal/balancergroup/balancergroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@
if sbc.balancer == nil {
sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts)
}
sbc.group.logger.Infof("Creating child policy of type %q for locality %q", sbc.builder.Name(), sbc.id)
sbc.group.logger.Infof("Creating child policy of type %q for child %q", sbc.builder.Name(), sbc.id)
sbc.balancer.SwitchTo(sbc.builder)
if sbc.ccState != nil {
sbc.balancer.UpdateClientConnState(*sbc.ccState)
Expand All @@ -121,14 +121,11 @@
sbc.ccState = &s
b := sbc.balancer
if b == nil {
// This sub-balancer was closed. This should never happen because
// sub-balancers are closed when the locality is removed from EDS, or
// the balancer group is closed. There should be no further address
// updates when either of this happened.
//
// This will be a common case with priority support, because a
// sub-balancer (and the whole balancer group) could be closed because
// it's the lower priority, but it can still get address updates.
// A sub-balancer is closed when it is removed from the group or the
// group is closed as a whole, and is not expected to receive updates
// after that. But when used with the priority LB policy a sub-balancer
// (and the whole balancer group) could be closed because it's the lower
// priority, but it can still get address updates.
return nil
}
return b.UpdateClientConnState(s)
Expand All @@ -137,14 +134,11 @@
func (sbc *subBalancerWrapper) resolverError(err error) {
b := sbc.balancer
if b == nil {
// This sub-balancer was closed. This should never happen because
// sub-balancers are closed when the locality is removed from EDS, or
// the balancer group is closed. There should be no further address
// updates when either of this happened.
//
// This will be a common case with priority support, because a
// sub-balancer (and the whole balancer group) could be closed because
// it's the lower priority, but it can still get address updates.
// A sub-balancer is closed when it is removed from the group or the
// group is closed as a whole, and is not expected to receive updates
// after that. But when used with the priority LB policy a sub-balancer
// (and the whole balancer group) could be closed because it's the lower
// priority, but it can still get address updates.
return
}
b.ResolverError(err)
Expand Down Expand Up @@ -302,7 +296,7 @@
//
// TODO: Get rid of the existing Add() API and replace it with this.
func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error {
bg.logger.Infof("Adding child policy of type %q for locality %q", balancerName, id)
bg.logger.Infof("Adding child policy of type %q for child %q", balancerName, id)
builder := balancer.Get(balancerName)
if builder == nil {
return fmt.Errorf("unregistered balancer name %q", balancerName)
Expand All @@ -318,7 +312,7 @@
if bg.outgoingStarted && bg.deletedBalancerCache != nil {
if old, ok := bg.deletedBalancerCache.Remove(id); ok {
if bg.logger.V(2) {
bg.logger.Infof("Removing and reusing child policy of type %q for locality %q from the balancer cache", balancerName, id)
bg.logger.Infof("Removing and reusing child policy of type %q for child %q from the balancer cache", balancerName, id)

Check warning on line 315 in internal/balancergroup/balancergroup.go

View check run for this annotation

Codecov / codecov/patch

internal/balancergroup/balancergroup.go#L315

Added line #L315 was not covered by tests
bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
}

Expand Down Expand Up @@ -372,13 +366,13 @@
// closed after timeout. Cleanup work (closing sub-balancer and removing
// subconns) will be done after timeout.
func (bg *BalancerGroup) Remove(id string) {
bg.logger.Infof("Removing child policy for locality %q", id)
bg.logger.Infof("Removing child policy for child %q", id)

bg.outgoingMu.Lock()

sbToRemove, ok := bg.idToBalancerConfig[id]
if !ok {
bg.logger.Errorf("Child policy for locality %q does not exist in the balancer group", id)
bg.logger.Errorf("Child policy for child %q does not exist in the balancer group", id)

Check warning on line 375 in internal/balancergroup/balancergroup.go

View check run for this annotation

Codecov / codecov/patch

internal/balancergroup/balancergroup.go#L375

Added line #L375 was not covered by tests
bg.outgoingMu.Unlock()
return
}
Expand All @@ -394,13 +388,13 @@

if bg.deletedBalancerCache != nil {
if bg.logger.V(2) {
bg.logger.Infof("Adding child policy for locality %q to the balancer cache", id)
bg.logger.Infof("Adding child policy for child %q to the balancer cache", id)

Check warning on line 391 in internal/balancergroup/balancergroup.go

View check run for this annotation

Codecov / codecov/patch

internal/balancergroup/balancergroup.go#L391

Added line #L391 was not covered by tests
bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
}

bg.deletedBalancerCache.Add(id, sbToRemove, func() {
if bg.logger.V(2) {
bg.logger.Infof("Removing child policy for locality %q from the balancer cache after timeout", id)
bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id)

Check warning on line 397 in internal/balancergroup/balancergroup.go

View check run for this annotation

Codecov / codecov/patch

internal/balancergroup/balancergroup.go#L397

Added line #L397 was not covered by tests
bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len())
}

Expand Down Expand Up @@ -541,7 +535,7 @@
// aggregator will create an aggregated picker and an aggregated connectivity
// state, then forward to ClientConn.
func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) {
bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state)
bg.logger.Infof("Balancer state update from child %v, new state: %+v", id, state)

// Send new state to the aggregator, without holding the incomingMu.
// incomingMu is to protect all calls to the parent ClientConn, this update
Expand Down