Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove unnecessary informer caches #1504

Merged
merged 12 commits into from
Apr 8, 2020
31 changes: 18 additions & 13 deletions cmd/controller-manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package main
import (
"context"
"flag"
"fmt"
"net/http"
_ "net/http/pprof"
"os"
Expand All @@ -33,6 +34,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/controller/tidbinitializer"
"github.com/pingcap/tidb-operator/pkg/controller/tidbmonitor"
"github.com/pingcap/tidb-operator/pkg/features"
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/pkg/scheme"
"github.com/pingcap/tidb-operator/pkg/version"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -135,20 +137,23 @@ func main() {

var informerFactory informers.SharedInformerFactory
var kubeInformerFactory kubeinformers.SharedInformerFactory
if controller.ClusterScoped {
informerFactory = informers.NewSharedInformerFactory(cli, controller.ResyncDuration)
kubeInformerFactory = kubeinformers.NewSharedInformerFactory(kubeCli, controller.ResyncDuration)
} else {
options := []informers.SharedInformerOption{
informers.WithNamespace(ns),
}
informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...)

kubeoptions := []kubeinformers.SharedInformerOption{
kubeinformers.WithNamespace(ns),
}
kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...)
labelSelector := fmt.Sprintf("%s=%s", label.ManagedByLabelKey, label.TiDBOperator)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why this is added? I guess we need to list/watch all our CRD objects with or without this label.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good catch

options := []informers.SharedInformerOption{
informers.WithTweakListOptions(func(listOpts *metav1.ListOptions) {
listOpts.LabelSelector = labelSelector
}),
}
kubeoptions := []kubeinformers.SharedInformerOption{
kubeinformers.WithTweakListOptions(func(listOpts *metav1.ListOptions) {
listOpts.LabelSelector = labelSelector
}),
}
if !controller.ClusterScoped {
options = append(options, informers.WithNamespace(ns))
kubeoptions = append(kubeoptions, kubeinformers.WithNamespace(ns))
}
informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...)
kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...)

rl := resourcelock.EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Expand Down
10 changes: 5 additions & 5 deletions pkg/backup/backup/backup_cleaner.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ import (
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
batchlisters "k8s.io/client-go/listers/batch/v1"
corelisters "k8s.io/client-go/listers/core/v1"
glog "k8s.io/klog"
)

Expand All @@ -36,20 +36,20 @@ type BackupCleaner interface {

type backupCleaner struct {
statusUpdater controller.BackupConditionUpdaterInterface
secretLister corelisters.SecretLister
kubeCli kubernetes.Interface
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
}

// NewBackupCleaner returns a BackupCleaner
func NewBackupCleaner(
statusUpdater controller.BackupConditionUpdaterInterface,
secretLister corelisters.SecretLister,
kubeCli kubernetes.Interface,
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface) BackupCleaner {
return &backupCleaner{
statusUpdater,
secretLister,
kubeCli,
jobLister,
jobControl,
}
Expand Down Expand Up @@ -112,7 +112,7 @@ func (bc *backupCleaner) makeCleanJob(backup *v1alpha1.Backup) (*batchv1.Job, st
ns := backup.GetNamespace()
name := backup.GetName()

storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bc.secretLister)
storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bc.kubeCli)
if err != nil {
return nil, reason, err
}
Expand Down
13 changes: 7 additions & 6 deletions pkg/backup/backup/backup_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,15 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
batchlisters "k8s.io/client-go/listers/batch/v1"
corelisters "k8s.io/client-go/listers/core/v1"
)

type backupManager struct {
backupCleaner BackupCleaner
statusUpdater controller.BackupConditionUpdaterInterface
secretLister corelisters.SecretLister
kubeCli kubernetes.Interface
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
pvcLister corelisters.PersistentVolumeClaimLister
Expand All @@ -45,7 +46,7 @@ type backupManager struct {
func NewBackupManager(
backupCleaner BackupCleaner,
statusUpdater controller.BackupConditionUpdaterInterface,
secretLister corelisters.SecretLister,
kubeCli kubernetes.Interface,
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface,
pvcLister corelisters.PersistentVolumeClaimLister,
Expand All @@ -54,7 +55,7 @@ func NewBackupManager(
return &backupManager{
backupCleaner,
statusUpdater,
secretLister,
kubeCli,
jobLister,
jobControl,
pvcLister,
Expand Down Expand Up @@ -163,12 +164,12 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s
ns := backup.GetNamespace()
name := backup.GetName()

envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, bm.secretLister)
envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, bm.kubeCli)
if err != nil {
return nil, reason, err
}

storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bm.secretLister)
storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err)
}
Expand Down Expand Up @@ -252,7 +253,7 @@ func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, s
ns := backup.GetNamespace()
name := backup.GetName()

envVars, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bm.secretLister)
envVars, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err)
}
Expand Down
11 changes: 6 additions & 5 deletions pkg/backup/restore/restore_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,15 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
batchlisters "k8s.io/client-go/listers/batch/v1"
corelisters "k8s.io/client-go/listers/core/v1"
)

type restoreManager struct {
backupLister listers.BackupLister
statusUpdater controller.RestoreConditionUpdaterInterface
secretLister corelisters.SecretLister
kubeCli kubernetes.Interface
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
pvcLister corelisters.PersistentVolumeClaimLister
Expand All @@ -46,7 +47,7 @@ type restoreManager struct {
func NewRestoreManager(
backupLister listers.BackupLister,
statusUpdater controller.RestoreConditionUpdaterInterface,
secretLister corelisters.SecretLister,
kubeCli kubernetes.Interface,
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface,
pvcLister corelisters.PersistentVolumeClaimLister,
Expand All @@ -55,7 +56,7 @@ func NewRestoreManager(
return &restoreManager{
backupLister,
statusUpdater,
secretLister,
kubeCli,
jobLister,
jobControl,
pvcLister,
Expand Down Expand Up @@ -125,12 +126,12 @@ func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Jo
ns := restore.GetNamespace()
name := restore.GetName()

envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, rm.secretLister)
envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, rm.kubeCli)
if err != nil {
return nil, reason, err
}

storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.StorageProvider, rm.secretLister)
storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.StorageProvider, rm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("restore %s/%s, %v", ns, name, err)
}
Expand Down
13 changes: 7 additions & 6 deletions pkg/backup/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/backup/constants"
corev1 "k8s.io/api/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)

// CheckAllKeysExistInSecret check if all keys are included in the specific secret
Expand Down Expand Up @@ -148,7 +149,7 @@ func GenerateGcsCertEnvVar(gcs *v1alpha1.GcsStorageProvider) ([]corev1.EnvVar, s
}

// GenerateStorageCertEnv generate the env info in order to access backend backup storage
func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) {
func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) {
var certEnv []corev1.EnvVar
var reason string
storageType := GetStorageType(provider)
Expand All @@ -159,7 +160,7 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret
return certEnv, "S3ConfigIsEmpty", errors.New("s3 config is empty")
}
s3SecretName := provider.S3.SecretName
secret, err := secretLister.Secrets(ns).Get(s3SecretName)
secret, err := kubeCli.CoreV1().Secrets(ns).Get(s3SecretName, metav1.GetOptions{})
if err != nil {
err := fmt.Errorf("get s3 secret %s/%s failed, err: %v", ns, s3SecretName, err)
return certEnv, "GetS3SecretFailed", err
Expand All @@ -180,7 +181,7 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret
return certEnv, "GcsConfigIsEmpty", errors.New("gcs config is empty")
}
gcsSecretName := provider.Gcs.SecretName
secret, err := secretLister.Secrets(ns).Get(gcsSecretName)
secret, err := kubeCli.CoreV1().Secrets(ns).Get(gcsSecretName, metav1.GetOptions{})
if err != nil {
err := fmt.Errorf("get gcs secret %s/%s failed, err: %v", ns, gcsSecretName, err)
return certEnv, "GetGcsSecretFailed", err
Expand All @@ -205,9 +206,9 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret
}

// GenerateTidbPasswordEnv generate the password EnvVar
func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) {
func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) {
var certEnv []corev1.EnvVar
secret, err := secretLister.Secrets(ns).Get(tidbSecretName)
secret, err := kubeCli.CoreV1().Secrets(ns).Get(tidbSecretName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("backup %s/%s get tidb secret %s failed, err: %v", ns, name, tidbSecretName, err)
return certEnv, "GetTidbSecretFailed", err
Expand Down
5 changes: 2 additions & 3 deletions pkg/controller/backup/backup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,10 @@ func NewController(
backupInformer := informerFactory.Pingcap().V1alpha1().Backups()
jobInformer := kubeInformerFactory.Batch().V1().Jobs()
pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims()
secretInformer := kubeInformerFactory.Core().V1().Secrets()
statusUpdater := controller.NewRealBackupConditionUpdater(cli, backupInformer.Lister(), recorder)
jobControl := controller.NewRealJobControl(kubeCli, recorder)
pvcControl := controller.NewRealGeneralPVCControl(kubeCli, recorder)
backupCleaner := backup.NewBackupCleaner(statusUpdater, secretInformer.Lister(), jobInformer.Lister(), jobControl)
backupCleaner := backup.NewBackupCleaner(statusUpdater, kubeCli, jobInformer.Lister(), jobControl)

bkc := &Controller{
kubeClient: kubeCli,
Expand All @@ -84,7 +83,7 @@ func NewController(
backup.NewBackupManager(
backupCleaner,
statusUpdater,
secretInformer.Lister(),
kubeCli,
jobInformer.Lister(),
jobControl,
pvcInformer.Lister(),
Expand Down
8 changes: 1 addition & 7 deletions pkg/controller/configmap_control.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
Expand All @@ -44,19 +43,16 @@ type ConfigMapControlInterface interface {
type realConfigMapControl struct {
client client.Client
kubeCli kubernetes.Interface
cmLister corelisters.ConfigMapLister
recorder record.EventRecorder
}

// NewRealSecretControl creates a new SecretControlInterface
func NewRealConfigMapControl(
kubeCli kubernetes.Interface,
cmLister corelisters.ConfigMapLister,
recorder record.EventRecorder,
) ConfigMapControlInterface {
return &realConfigMapControl{
kubeCli: kubeCli,
cmLister: cmLister,
recorder: recorder,
}
}
Expand All @@ -81,7 +77,7 @@ func (cc *realConfigMapControl) UpdateConfigMap(owner runtime.Object, cm *corev1
return nil
}

if updated, err := cc.cmLister.ConfigMaps(cm.Namespace).Get(cmName); err != nil {
if updated, err := cc.kubeCli.CoreV1().ConfigMaps(cm.Namespace).Get(cmName, metav1.GetOptions{}); err != nil {
utilruntime.HandleError(fmt.Errorf("error getting updated ConfigMap %s/%s from lister: %v", ns, cmName, err))
} else {
cm = updated.DeepCopy()
Expand Down Expand Up @@ -125,7 +121,6 @@ var _ ConfigMapControlInterface = &realConfigMapControl{}
// NewFakeConfigMapControl returns a FakeConfigMapControl
func NewFakeConfigMapControl(cmInformer coreinformers.ConfigMapInformer) *FakeConfigMapControl {
return &FakeConfigMapControl{
cmInformer.Lister(),
cmInformer.Informer().GetIndexer(),
RequestTracker{},
RequestTracker{},
Expand All @@ -135,7 +130,6 @@ func NewFakeConfigMapControl(cmInformer coreinformers.ConfigMapInformer) *FakeCo

// FakeConfigMapControl is a fake ConfigMapControlInterface
type FakeConfigMapControl struct {
CmLister corelisters.ConfigMapLister
CmIndexer cache.Indexer
createConfigMapTracker RequestTracker
updateConfigMapTracker RequestTracker
Expand Down
18 changes: 6 additions & 12 deletions pkg/controller/configmap_control_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
corelisters "k8s.io/client-go/listers/core/v1"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
)

Expand All @@ -35,7 +33,7 @@ func TestConfigMapControlCreatesConfigMaps(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
control := NewRealConfigMapControl(fakeClient, nil, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
Expand All @@ -54,7 +52,7 @@ func TestConfigMapControlCreatesConfigMapFailed(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
control := NewRealConfigMapControl(fakeClient, nil, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
Expand All @@ -73,7 +71,7 @@ func TestConfigMapControlUpdateConfigMap(t *testing.T) {
cm := newConfigMap()
cm.Data["file"] = "test"
fakeClient := &fake.Clientset{}
control := NewRealConfigMapControl(fakeClient, nil, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
return true, update.GetObject(), nil
Expand All @@ -94,13 +92,9 @@ func TestConfigMapControlUpdateConfigMapConflictSuccess(t *testing.T) {
cm := newConfigMap()
cm.Data["file"] = "test"
fakeClient := &fake.Clientset{}
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
oldcm := newConfigMap()
oldcm.Data["file"] = "test2"
err := indexer.Add(oldcm)
g.Expect(err).To(Succeed())
cmLister := corelisters.NewConfigMapLister(indexer)
control := NewRealConfigMapControl(fakeClient, cmLister, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
conflict := false
fakeClient.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
Expand All @@ -125,7 +119,7 @@ func TestConfigMapControlDeleteConfigMap(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
control := NewRealConfigMapControl(fakeClient, nil, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("delete", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
Expand All @@ -142,7 +136,7 @@ func TestConfigMapControlDeleteConfigMapFailed(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
control := NewRealConfigMapControl(fakeClient, nil, recorder)
control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("delete", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
Expand Down
Loading