-
Notifications
You must be signed in to change notification settings - Fork 351
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
chore: endpointslice controller #574
Changes from all commits
e25adc8
335338f
f646aa7
f7c6050
27b0e4b
6dd0469
79e7f83
2d12304
f820ab5
9d95a9d
96b2315
3e93d1b
48bf2d2
4200a8b
79fa610
9731cea
1760d27
e05fc06
244727e
2513b47
a98e123
603b52e
3367254
2126fd3
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -49,7 +49,7 @@ Tips: The failure caused by empty upstream nodes is a limitation of Apache APISI | |
|
||
6. What is the retry rule of `apisix-ingress-controller`? | ||
|
||
If an error occurs during the process of `apisix-ingress-controller` parsing CRD and distributing the configuration to APISIX, a retry will be triggered. | ||
If an error occurs duriREADME.mdng the process of `apisix-ingress-controller` parsing CRD and distributing the configuration to APISIX, a retry will be triggered. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Misspell |
||
|
||
The delayed retry method is adopted. After the first failure, it is retried once per second. After 5 retries are triggered, the slow retry strategy will be enabled, and the retry will be performed every 1 minute until it succeeds. | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,8 +21,11 @@ import ( | |
"sync" | ||
"time" | ||
|
||
apisixcache "github.com/apache/apisix-ingress-controller/pkg/apisix/cache" | ||
configv1 "github.com/apache/apisix-ingress-controller/pkg/kube/apisix/apis/config/v1" | ||
"go.uber.org/zap" | ||
v1 "k8s.io/api/core/v1" | ||
k8serrors "k8s.io/apimachinery/pkg/api/errors" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/runtime" | ||
utilruntime "k8s.io/apimachinery/pkg/util/runtime" | ||
|
@@ -107,10 +110,11 @@ type Controller struct { | |
apisixConsumerLister listersv2alpha1.ApisixConsumerLister | ||
|
||
// resource controllers | ||
podController *podController | ||
endpointsController *endpointsController | ||
ingressController *ingressController | ||
secretController *secretController | ||
podController *podController | ||
endpointsController *endpointsController | ||
endpointSliceController *endpointSliceController | ||
ingressController *ingressController | ||
secretController *secretController | ||
|
||
apisixUpstreamController *apisixUpstreamController | ||
apisixRouteController *apisixRouteController | ||
|
@@ -233,8 +237,12 @@ func (c *Controller) initWhenStartLeading() { | |
c.apisixTlsInformer = apisixFactory.Apisix().V1().ApisixTlses().Informer() | ||
c.apisixConsumerInformer = apisixFactory.Apisix().V2alpha1().ApisixConsumers().Informer() | ||
|
||
if c.cfg.Kubernetes.WatchEndpointSlices { | ||
c.endpointSliceController = c.newEndpointSliceController() | ||
} else { | ||
c.endpointsController = c.newEndpointsController() | ||
} | ||
c.podController = c.newPodController() | ||
c.endpointsController = c.newEndpointsController() | ||
c.apisixUpstreamController = c.newApisixUpstreamController() | ||
c.ingressController = c.newIngressController() | ||
c.apisixRouteController = c.newApisixRouteController() | ||
|
@@ -425,7 +433,11 @@ func (c *Controller) run(ctx context.Context) { | |
c.podController.run(ctx) | ||
}) | ||
c.goAttach(func() { | ||
c.endpointsController.run(ctx) | ||
if c.cfg.Kubernetes.WatchEndpointSlices { | ||
c.endpointSliceController.run(ctx) | ||
} else { | ||
c.endpointsController.run(ctx) | ||
} | ||
}) | ||
c.goAttach(func() { | ||
c.apisixUpstreamController.run(ctx) | ||
|
@@ -504,6 +516,85 @@ func (c *Controller) syncConsumer(ctx context.Context, consumer *apisixv1.Consum | |
} | ||
return | ||
} | ||
|
||
func (c *Controller) syncEndpoint(ctx context.Context, ep kube.Endpoint) error { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why write sync in the controller? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Because these logics can be reused by both the endpoint controller and endpointslice controller. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I mean put them in another file maybe better , not in controller . |
||
namespace := ep.Namespace() | ||
svcName := ep.ServiceName() | ||
svc, err := c.svcLister.Services(ep.Namespace()).Get(svcName) | ||
if err != nil { | ||
if k8serrors.IsNotFound(err) { | ||
log.Infof("service %s/%s not found", ep.Namespace(), svcName) | ||
return nil | ||
} | ||
log.Errorf("failed to get service %s/%s: %s", ep.Namespace(), svcName, err) | ||
return err | ||
} | ||
var subsets []configv1.ApisixUpstreamSubset | ||
subsets = append(subsets, configv1.ApisixUpstreamSubset{}) | ||
au, err := c.apisixUpstreamLister.ApisixUpstreams(namespace).Get(svcName) | ||
if err != nil { | ||
if !k8serrors.IsNotFound(err) { | ||
log.Errorf("failed to get ApisixUpstream %s/%s: %s", ep.Namespace(), svcName, err) | ||
return err | ||
} | ||
} else if len(au.Spec.Subsets) > 0 { | ||
subsets = append(subsets, au.Spec.Subsets...) | ||
} | ||
|
||
clusters := c.apisix.ListClusters() | ||
for _, port := range svc.Spec.Ports { | ||
for _, subset := range subsets { | ||
nodes, err := c.translator.TranslateUpstreamNodes(ep, port.Port, subset.Labels) | ||
if err != nil { | ||
log.Errorw("failed to translate upstream nodes", | ||
zap.Error(err), | ||
zap.Any("endpoints", ep), | ||
zap.Int32("port", port.Port), | ||
) | ||
} | ||
name := apisixv1.ComposeUpstreamName(namespace, svcName, subset.Name, port.Port) | ||
for _, cluster := range clusters { | ||
if err := c.syncUpstreamNodesChangeToCluster(ctx, cluster, nodes, name); err != nil { | ||
return err | ||
} | ||
} | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
func (c *Controller) syncUpstreamNodesChangeToCluster(ctx context.Context, cluster apisix.Cluster, nodes apisixv1.UpstreamNodes, upsName string) error { | ||
upstream, err := cluster.Upstream().Get(ctx, upsName) | ||
if err != nil { | ||
if err == apisixcache.ErrNotFound { | ||
log.Warnw("upstream is not referenced", | ||
zap.String("cluster", cluster.String()), | ||
zap.String("upstream", upsName), | ||
) | ||
return nil | ||
} else { | ||
log.Errorw("failed to get upstream", | ||
zap.String("upstream", upsName), | ||
zap.String("cluster", cluster.String()), | ||
zap.Error(err), | ||
) | ||
return err | ||
} | ||
} | ||
|
||
upstream.Nodes = nodes | ||
|
||
log.Debugw("upstream binds new nodes", | ||
zap.Any("upstream", upstream), | ||
zap.String("cluster", cluster.String()), | ||
) | ||
|
||
updated := &manifest{ | ||
upstreams: []*apisixv1.Upstream{upstream}, | ||
} | ||
return c.syncManifests(ctx, nil, updated, nil) | ||
} | ||
|
||
func (c *Controller) checkClusterHealth(ctx context.Context, cancelFunc context.CancelFunc) { | ||
defer cancelFunc() | ||
for { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
EndpointSlice introduced from v1.16
https://kubernetes.io/blog/2019/09/18/kubernetes-1-16-release-announcement/#introducing-endpoint-slices