-
Notifications
You must be signed in to change notification settings - Fork 4.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
xds/resolver: Add support for cluster specifier plugins #4987
Changes from 4 commits
44160e9
8a6bf55
a616529
6393ab4
af713af
d219ac6
5b4f327
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -38,14 +38,17 @@ import ( | |
"google.golang.org/grpc/status" | ||
"google.golang.org/grpc/xds/internal/balancer/clustermanager" | ||
"google.golang.org/grpc/xds/internal/balancer/ringhash" | ||
"google.golang.org/grpc/xds/internal/clusterspecifier" | ||
"google.golang.org/grpc/xds/internal/httpfilter" | ||
"google.golang.org/grpc/xds/internal/httpfilter/router" | ||
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource" | ||
) | ||
|
||
const ( | ||
cdsName = "cds_experimental" | ||
xdsClusterManagerName = "xds_cluster_manager_experimental" | ||
cdsName = "cds_experimental" | ||
xdsClusterManagerName = "xds_cluster_manager_experimental" | ||
clusterPrefix = "cluster:" | ||
clusterSpecifierPluginPrefix = "cluster_specifier_plugin:" | ||
) | ||
|
||
type serviceConfig struct { | ||
|
@@ -83,12 +86,23 @@ func (r *xdsResolver) pruneActiveClusters() { | |
// serviceConfigJSON produces a service config in JSON format representing all | ||
// the clusters referenced in activeClusters. This includes clusters with zero | ||
// references, so they must be pruned first. | ||
func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) { | ||
func serviceConfigJSON(activeClusters map[string]*clusterInfo, clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig) ([]byte, error) { | ||
// Generate children (all entries in activeClusters). | ||
children := make(map[string]xdsChildConfig) | ||
for cluster := range activeClusters { | ||
children[cluster] = xdsChildConfig{ | ||
ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}), | ||
// Look into cluster specifier plugins, which hasn't had any prefix attached to it's cluster specifier plugin names, | ||
// to determine the LB Config if the cluster is a CSP. | ||
cspCfg, ok := clusterSpecifierPlugins[strings.TrimPrefix(cluster, clusterSpecifierPluginPrefix)] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So... if a CSP is called "cluster:foo" and a cluster named "foo" exists, then "cluster:foo" will be found in the CSP map, even though that isn't what was intended. Should we be looking at whether the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Simply persisted the config in clusterInfo, which solves this correctness issue. |
||
if ok { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Are we handling this case correctly (I think we're not):
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, I found a solution that solves this problem and the comment about name collisions on the branch in serviceConfigJSON. I simply added the csp balancer configuration (if the cluster is a csp) to the cluster info. This reuses all the plumbing around active clusters, and also solves the correctness issue on the branch of the cluster specifier, as you can branch on clusterInfo.cspCfg != nil. |
||
children[cluster] = xdsChildConfig{ | ||
ChildPolicy: balancerConfig(cspCfg), | ||
} | ||
} else { | ||
// Will now have "cluster:" prefixing the cluster name...CDS policy | ||
// will now have to trim this off when it queries CDS. | ||
children[cluster] = xdsChildConfig{ | ||
ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}), | ||
} | ||
} | ||
} | ||
|
||
|
@@ -134,11 +148,15 @@ func (r route) String() string { | |
} | ||
|
||
type configSelector struct { | ||
r *xdsResolver | ||
virtualHost virtualHost | ||
routes []route | ||
clusters map[string]*clusterInfo | ||
httpFilterConfig []xdsresource.HTTPFilter | ||
r *xdsResolver | ||
virtualHost virtualHost | ||
routes []route | ||
clusters map[string]*clusterInfo | ||
httpFilterConfig []xdsresource.HTTPFilter | ||
clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig | ||
// Will be used for: | ||
// a. serviceConfigJSON (this will build out the service config...but you still need to keep ref | ||
// counts in active clusters), this will be used to get the LB Configurations. | ||
} | ||
|
||
var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found") | ||
|
@@ -158,10 +176,12 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP | |
if rt == nil || rt.clusters == nil { | ||
return nil, errNoMatchedRouteFound | ||
} | ||
|
||
cluster, ok := rt.clusters.Next().(*routeCluster) | ||
if !ok { | ||
return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster) | ||
} | ||
|
||
// Add a ref to the selected cluster, as this RPC needs this cluster until | ||
// it is committed. | ||
ref := &cs.clusters[cluster.name].refCount | ||
|
@@ -346,28 +366,29 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro | |
httpFilterConfigOverride: su.virtualHost.HTTPFilterConfigOverride, | ||
retryConfig: su.virtualHost.RetryConfig, | ||
}, | ||
routes: make([]route, len(su.virtualHost.Routes)), | ||
clusters: make(map[string]*clusterInfo), | ||
httpFilterConfig: su.ldsConfig.httpFilterConfig, | ||
routes: make([]route, len(su.virtualHost.Routes)), | ||
clusters: make(map[string]*clusterInfo), | ||
clusterSpecifierPlugins: su.clusterSpecifierPlugins, | ||
httpFilterConfig: su.ldsConfig.httpFilterConfig, | ||
} | ||
|
||
for i, rt := range su.virtualHost.Routes { | ||
clusters := newWRR() | ||
for cluster, wc := range rt.WeightedClusters { | ||
if rt.ClusterSpecifierPlugin != "" { | ||
clusterName := clusterSpecifierPluginPrefix + rt.ClusterSpecifierPlugin | ||
clusters.Add(&routeCluster{ | ||
name: cluster, | ||
httpFilterConfigOverride: wc.HTTPFilterConfigOverride, | ||
}, int64(wc.Weight)) | ||
|
||
// Initialize entries in cs.clusters map, creating entries in | ||
// r.activeClusters as necessary. Set to zero as they will be | ||
// incremented by incRefs. | ||
ci := r.activeClusters[cluster] | ||
if ci == nil { | ||
ci = &clusterInfo{refCount: 0} | ||
r.activeClusters[cluster] = ci | ||
name: clusterName, | ||
}, 1) | ||
r.initializeCluster(clusterName, cs) | ||
} else { | ||
for cluster, wc := range rt.WeightedClusters { | ||
clusterName := clusterPrefix + cluster | ||
clusters.Add(&routeCluster{ | ||
name: clusterName, | ||
httpFilterConfigOverride: wc.HTTPFilterConfigOverride, | ||
}, int64(wc.Weight)) | ||
r.initializeCluster(clusterName, cs) | ||
} | ||
cs.clusters[cluster] = ci | ||
} | ||
cs.routes[i].clusters = clusters | ||
|
||
|
@@ -397,6 +418,18 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro | |
return cs, nil | ||
} | ||
|
||
// initializeCluster initializes entries in cs.clusters map, creating entries in | ||
// r.activeClusters as necessary. Any created entries will be set to zero as | ||
// they will be incremented by incRefs. | ||
func (r *xdsResolver) initializeCluster(clusterName string, cs *configSelector) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this wants to be a method on There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, great idea. These two comments make it much cleaner. Switched. |
||
ci := r.activeClusters[clusterName] | ||
if ci == nil { | ||
ci = &clusterInfo{refCount: 0} | ||
r.activeClusters[clusterName] = ci | ||
} | ||
cs.clusters[clusterName] = ci | ||
} | ||
|
||
type clusterInfo struct { | ||
// number of references to this cluster; accessed atomically | ||
refCount int32 | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -25,6 +25,7 @@ import ( | |
|
||
"google.golang.org/grpc/internal/grpclog" | ||
"google.golang.org/grpc/internal/pretty" | ||
"google.golang.org/grpc/xds/internal/clusterspecifier" | ||
"google.golang.org/grpc/xds/internal/xdsclient" | ||
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource" | ||
) | ||
|
@@ -35,6 +36,9 @@ import ( | |
type serviceUpdate struct { | ||
// virtualHost contains routes and other configuration to route RPCs. | ||
virtualHost *xdsresource.VirtualHost | ||
// clusterSpecifierPlugins contain the configurations for any cluster | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: containS (the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, I see, switched. |
||
// specifier plugins emitted by the xdsclient. | ||
clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig | ||
// ldsConfig contains configuration that applies to all routes. | ||
ldsConfig ldsConfig | ||
} | ||
|
@@ -120,7 +124,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, | |
} | ||
|
||
// Handle the inline RDS update as if it's from an RDS watch. | ||
w.updateVirtualHostsFromRDS(*update.InlineRouteConfig) | ||
w.applyRouteConfigUpdate(*update.InlineRouteConfig) | ||
return | ||
} | ||
|
||
|
@@ -151,7 +155,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, | |
w.rdsCancel = w.c.WatchRouteConfig(update.RouteConfigName, w.handleRDSResp) | ||
} | ||
|
||
func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsresource.RouteConfigUpdate) { | ||
func (w *serviceUpdateWatcher) applyRouteConfigUpdate(update xdsresource.RouteConfigUpdate) { | ||
matchVh := xdsresource.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) | ||
if matchVh == nil { | ||
// No matching virtual host found. | ||
|
@@ -160,6 +164,7 @@ func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsresource.Rout | |
} | ||
|
||
w.lastUpdate.virtualHost = matchVh | ||
w.lastUpdate.clusterSpecifierPlugins = update.ClusterSpecifierPlugins | ||
dfawley marked this conversation as resolved.
Show resolved
Hide resolved
|
||
w.serviceCb(w.lastUpdate, nil) | ||
} | ||
|
||
|
@@ -179,7 +184,7 @@ func (w *serviceUpdateWatcher) handleRDSResp(update xdsresource.RouteConfigUpdat | |
w.serviceCb(serviceUpdate{}, err) | ||
return | ||
} | ||
w.updateVirtualHostsFromRDS(update) | ||
w.applyRouteConfigUpdate(update) | ||
} | ||
|
||
func (w *serviceUpdateWatcher) close() { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should this be stripped by the xds cluster manager LB policy instead? That is what I was expecting we'd do... I'm not sure how other languages are implementing this, either, since it apparently wasn't specified in the design.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Discussed in person, decided it would be best it to strip this in the name resolver, while constructing the Cluster Manager LB Config itself. (kept the cluster: prefix for the name of the cluster, but stripped it for the underlying cds policy)