Skip to content

Commit

Permalink
factory: strip Managed Fields to reduce memory usage
Browse files Browse the repository at this point in the history
We don't care about them, so why cache them and use a ton
of memory.

Inspired by kubernetes/kubernetes#118455

Signed-off-by: Dan Williams <dcbw@redhat.com>
  • Loading branch information
dcbw committed Nov 15, 2023
1 parent ac26e9d commit 93a9bac
Showing 1 changed file with 26 additions and 1 deletion.
27 changes: 26 additions & 1 deletion go-controller/pkg/factory/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package factory

import (
"context"
"encoding/json"
"fmt"
"reflect"
"sync/atomic"
Expand Down Expand Up @@ -57,6 +58,7 @@ import (
kapi "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
knet "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
Expand Down Expand Up @@ -193,14 +195,37 @@ func NewMasterWatchFactory(ovnClientset *util.OVNMasterClientset) (*WatchFactory

// NewOVNKubeControllerWatchFactory initializes a new watch factory for the ovnkube controller process
func NewOVNKubeControllerWatchFactory(ovnClientset *util.OVNKubeControllerClientset) (*WatchFactory, error) {
// Informer transform to trim ManagedFields for memory efficiency.
trim := func(obj interface{}) (interface{}, error) {
if accessor, err := meta.Accessor(obj); err == nil {
accessor.SetManagedFields(nil)
}
if pod, ok := obj.(*kapi.Pod); ok {
pod.Spec.Volumes = []kapi.Volume{}
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].Command = nil
pod.Spec.Containers[i].Args = nil
pod.Spec.Containers[i].Env = nil
pod.Spec.Containers[i].VolumeMounts = nil
}
} else if node, ok := obj.(*kapi.Node); ok {
node.Status.Images = nil
node.Status.VolumesInUse = nil
node.Status.VolumesAttached = nil
node.Status.Capacity = nil
node.Status.Allocatable = nil
}
return obj, nil
}

// resync time is 12 hours, none of the resources being watched in ovn-kubernetes have
// any race condition where a resync may be required e.g. cni executable on node watching for
// events on pods and assuming that an 'ADD' event will contain the annotations put in by
// ovnkube master (currently, it is just a 'get' loop)
// the downside of making it tight (like 10 minutes) is needless spinning on all resources
// However, AddEventHandlerWithResyncPeriod can specify a per handler resync period
wf := &WatchFactory{
iFactory: informerfactory.NewSharedInformerFactory(ovnClientset.KubeClient, resyncInterval),
iFactory: informerfactory.NewSharedInformerFactoryWithOptions(ovnClientset.KubeClient, resyncInterval, informerfactory.WithTransform(trim)),
anpFactory: anpinformerfactory.NewSharedInformerFactory(ovnClientset.ANPClient, resyncInterval),
eipFactory: egressipinformerfactory.NewSharedInformerFactory(ovnClientset.EgressIPClient, resyncInterval),
efFactory: egressfirewallinformerfactory.NewSharedInformerFactory(ovnClientset.EgressFirewallClient, resyncInterval),
Expand Down

0 comments on commit 93a9bac

Please sign in to comment.