From 2bf5612bed0ca8047fb003e7a9964d2548c4f457 Mon Sep 17 00:00:00 2001 From: Aylei Date: Wed, 8 Jan 2020 12:45:57 +0800 Subject: [PATCH 1/3] Remove informer cache for configmap and secret, only watch for resources managed by tidb-operator Signed-off-by: Aylei --- cmd/controller-manager/main.go | 31 +++++++++++-------- pkg/backup/backup/backup_cleaner.go | 10 +++--- pkg/backup/backup/backup_manager.go | 13 ++++---- pkg/backup/restore/restore_manager.go | 11 ++++--- pkg/backup/util/util.go | 13 ++++---- pkg/controller/backup/backup_controller.go | 5 ++- pkg/controller/configmap_control.go | 8 +---- pkg/controller/configmap_control_test.go | 18 ++++------- pkg/controller/restore/restore_controller.go | 3 +- pkg/controller/secret_control.go | 14 +++------ .../tidbcluster/tidb_cluster_controller.go | 8 ++--- pkg/manager/member/pd_member_manager_test.go | 3 +- pkg/manager/member/pump_member_manager.go | 3 -- .../member/pump_member_manager_test.go | 1 - pkg/manager/member/tidb_member_manager.go | 2 -- .../member/tidb_member_manager_test.go | 2 +- 16 files changed, 61 insertions(+), 84 deletions(-) diff --git a/cmd/controller-manager/main.go b/cmd/controller-manager/main.go index 88deee6921..3bd0f5b556 100644 --- a/cmd/controller-manager/main.go +++ b/cmd/controller-manager/main.go @@ -16,6 +16,7 @@ package main import ( "context" "flag" + "fmt" "net/http" _ "net/http/pprof" "os" @@ -33,6 +34,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller/tidbinitializer" "github.com/pingcap/tidb-operator/pkg/controller/tidbmonitor" "github.com/pingcap/tidb-operator/pkg/features" + "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/scheme" "github.com/pingcap/tidb-operator/pkg/version" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -135,20 +137,23 @@ func main() { var informerFactory informers.SharedInformerFactory var kubeInformerFactory kubeinformers.SharedInformerFactory - if controller.ClusterScoped { - informerFactory = informers.NewSharedInformerFactory(cli, controller.ResyncDuration) - kubeInformerFactory = kubeinformers.NewSharedInformerFactory(kubeCli, controller.ResyncDuration) - } else { - options := []informers.SharedInformerOption{ - informers.WithNamespace(ns), - } - informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...) - - kubeoptions := []kubeinformers.SharedInformerOption{ - kubeinformers.WithNamespace(ns), - } - kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...) + labelSelector := fmt.Sprintf("%s=%s", label.ManagedByLabelKey, label.TiDBOperator) + options := []informers.SharedInformerOption{ + informers.WithTweakListOptions(func(listOpts *metav1.ListOptions) { + listOpts.LabelSelector = labelSelector + }), + } + kubeoptions := []kubeinformers.SharedInformerOption{ + kubeinformers.WithTweakListOptions(func(listOpts *metav1.ListOptions) { + listOpts.LabelSelector = labelSelector + }), + } + if !controller.ClusterScoped { + options = append(options, informers.WithNamespace(ns)) + kubeoptions = append(kubeoptions, kubeinformers.WithNamespace(ns)) } + informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...) + kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...) rl := resourcelock.EndpointsLock{ EndpointsMeta: metav1.ObjectMeta{ diff --git a/pkg/backup/backup/backup_cleaner.go b/pkg/backup/backup/backup_cleaner.go index 4b90d5874b..e4fa612e3c 100644 --- a/pkg/backup/backup/backup_cleaner.go +++ b/pkg/backup/backup/backup_cleaner.go @@ -24,8 +24,8 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" batchlisters "k8s.io/client-go/listers/batch/v1" - corelisters "k8s.io/client-go/listers/core/v1" glog "k8s.io/klog" ) @@ -36,7 +36,7 @@ type BackupCleaner interface { type backupCleaner struct { statusUpdater controller.BackupConditionUpdaterInterface - secretLister corelisters.SecretLister + kubeCli kubernetes.Interface jobLister batchlisters.JobLister jobControl controller.JobControlInterface } @@ -44,12 +44,12 @@ type backupCleaner struct { // NewBackupCleaner returns a BackupCleaner func NewBackupCleaner( statusUpdater controller.BackupConditionUpdaterInterface, - secretLister corelisters.SecretLister, + kubeCli kubernetes.Interface, jobLister batchlisters.JobLister, jobControl controller.JobControlInterface) BackupCleaner { return &backupCleaner{ statusUpdater, - secretLister, + kubeCli, jobLister, jobControl, } @@ -112,7 +112,7 @@ func (bc *backupCleaner) makeCleanJob(backup *v1alpha1.Backup) (*batchv1.Job, st ns := backup.GetNamespace() name := backup.GetName() - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bc.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bc.kubeCli) if err != nil { return nil, reason, err } diff --git a/pkg/backup/backup/backup_manager.go b/pkg/backup/backup/backup_manager.go index a8760d8fd5..c1ab58fcab 100644 --- a/pkg/backup/backup/backup_manager.go +++ b/pkg/backup/backup/backup_manager.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" batchlisters "k8s.io/client-go/listers/batch/v1" corelisters "k8s.io/client-go/listers/core/v1" ) @@ -34,7 +35,7 @@ import ( type backupManager struct { backupCleaner BackupCleaner statusUpdater controller.BackupConditionUpdaterInterface - secretLister corelisters.SecretLister + kubeCli kubernetes.Interface jobLister batchlisters.JobLister jobControl controller.JobControlInterface pvcLister corelisters.PersistentVolumeClaimLister @@ -45,7 +46,7 @@ type backupManager struct { func NewBackupManager( backupCleaner BackupCleaner, statusUpdater controller.BackupConditionUpdaterInterface, - secretLister corelisters.SecretLister, + kubeCli kubernetes.Interface, jobLister batchlisters.JobLister, jobControl controller.JobControlInterface, pvcLister corelisters.PersistentVolumeClaimLister, @@ -54,7 +55,7 @@ func NewBackupManager( return &backupManager{ backupCleaner, statusUpdater, - secretLister, + kubeCli, jobLister, jobControl, pvcLister, @@ -163,12 +164,12 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s ns := backup.GetNamespace() name := backup.GetName() - envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, bm.secretLister) + envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, bm.kubeCli) if err != nil { return nil, reason, err } - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bm.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err) } @@ -252,7 +253,7 @@ func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, s ns := backup.GetNamespace() name := backup.GetName() - envVars, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bm.secretLister) + envVars, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err) } diff --git a/pkg/backup/restore/restore_manager.go b/pkg/backup/restore/restore_manager.go index 3431d354d0..b34106b600 100644 --- a/pkg/backup/restore/restore_manager.go +++ b/pkg/backup/restore/restore_manager.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" batchlisters "k8s.io/client-go/listers/batch/v1" corelisters "k8s.io/client-go/listers/core/v1" ) @@ -35,7 +36,7 @@ import ( type restoreManager struct { backupLister listers.BackupLister statusUpdater controller.RestoreConditionUpdaterInterface - secretLister corelisters.SecretLister + kubeCli kubernetes.Interface jobLister batchlisters.JobLister jobControl controller.JobControlInterface pvcLister corelisters.PersistentVolumeClaimLister @@ -46,7 +47,7 @@ type restoreManager struct { func NewRestoreManager( backupLister listers.BackupLister, statusUpdater controller.RestoreConditionUpdaterInterface, - secretLister corelisters.SecretLister, + kubeCli kubernetes.Interface, jobLister batchlisters.JobLister, jobControl controller.JobControlInterface, pvcLister corelisters.PersistentVolumeClaimLister, @@ -55,7 +56,7 @@ func NewRestoreManager( return &restoreManager{ backupLister, statusUpdater, - secretLister, + kubeCli, jobLister, jobControl, pvcLister, @@ -125,12 +126,12 @@ func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Jo ns := restore.GetNamespace() name := restore.GetName() - envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, rm.secretLister) + envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, rm.kubeCli) if err != nil { return nil, reason, err } - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.StorageProvider, rm.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.StorageProvider, rm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("restore %s/%s, %v", ns, name, err) } diff --git a/pkg/backup/util/util.go b/pkg/backup/util/util.go index a7aea37e9a..5505724ca7 100644 --- a/pkg/backup/util/util.go +++ b/pkg/backup/util/util.go @@ -22,7 +22,8 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/backup/constants" corev1 "k8s.io/api/core/v1" - corelisters "k8s.io/client-go/listers/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" ) // CheckAllKeysExistInSecret check if all keys are included in the specific secret @@ -148,7 +149,7 @@ func GenerateGcsCertEnvVar(gcs *v1alpha1.GcsStorageProvider) ([]corev1.EnvVar, s } // GenerateStorageCertEnv generate the env info in order to access backend backup storage -func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) { +func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) { var certEnv []corev1.EnvVar var reason string storageType := GetStorageType(provider) @@ -159,7 +160,7 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret return certEnv, "S3ConfigIsEmpty", errors.New("s3 config is empty") } s3SecretName := provider.S3.SecretName - secret, err := secretLister.Secrets(ns).Get(s3SecretName) + secret, err := kubeCli.CoreV1().Secrets(ns).Get(s3SecretName, metav1.GetOptions{}) if err != nil { err := fmt.Errorf("get s3 secret %s/%s failed, err: %v", ns, s3SecretName, err) return certEnv, "GetS3SecretFailed", err @@ -180,7 +181,7 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret return certEnv, "GcsConfigIsEmpty", errors.New("gcs config is empty") } gcsSecretName := provider.Gcs.SecretName - secret, err := secretLister.Secrets(ns).Get(gcsSecretName) + secret, err := kubeCli.CoreV1().Secrets(ns).Get(gcsSecretName, metav1.GetOptions{}) if err != nil { err := fmt.Errorf("get gcs secret %s/%s failed, err: %v", ns, gcsSecretName, err) return certEnv, "GetGcsSecretFailed", err @@ -205,9 +206,9 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret } // GenerateTidbPasswordEnv generate the password EnvVar -func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) { +func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) { var certEnv []corev1.EnvVar - secret, err := secretLister.Secrets(ns).Get(tidbSecretName) + secret, err := kubeCli.CoreV1().Secrets(ns).Get(tidbSecretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("backup %s/%s get tidb secret %s failed, err: %v", ns, name, tidbSecretName, err) return certEnv, "GetTidbSecretFailed", err diff --git a/pkg/controller/backup/backup_controller.go b/pkg/controller/backup/backup_controller.go index dd2ec48e1a..f6f5ac7e0f 100644 --- a/pkg/controller/backup/backup_controller.go +++ b/pkg/controller/backup/backup_controller.go @@ -70,11 +70,10 @@ func NewController( backupInformer := informerFactory.Pingcap().V1alpha1().Backups() jobInformer := kubeInformerFactory.Batch().V1().Jobs() pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims() - secretInformer := kubeInformerFactory.Core().V1().Secrets() statusUpdater := controller.NewRealBackupConditionUpdater(cli, backupInformer.Lister(), recorder) jobControl := controller.NewRealJobControl(kubeCli, recorder) pvcControl := controller.NewRealGeneralPVCControl(kubeCli, recorder) - backupCleaner := backup.NewBackupCleaner(statusUpdater, secretInformer.Lister(), jobInformer.Lister(), jobControl) + backupCleaner := backup.NewBackupCleaner(statusUpdater, kubeCli, jobInformer.Lister(), jobControl) bkc := &Controller{ kubeClient: kubeCli, @@ -84,7 +83,7 @@ func NewController( backup.NewBackupManager( backupCleaner, statusUpdater, - secretInformer.Lister(), + kubeCli, jobInformer.Lister(), jobControl, pvcInformer.Lister(), diff --git a/pkg/controller/configmap_control.go b/pkg/controller/configmap_control.go index 385235b881..a883767d58 100644 --- a/pkg/controller/configmap_control.go +++ b/pkg/controller/configmap_control.go @@ -23,7 +23,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" @@ -44,19 +43,16 @@ type ConfigMapControlInterface interface { type realConfigMapControl struct { client client.Client kubeCli kubernetes.Interface - cmLister corelisters.ConfigMapLister recorder record.EventRecorder } // NewRealSecretControl creates a new SecretControlInterface func NewRealConfigMapControl( kubeCli kubernetes.Interface, - cmLister corelisters.ConfigMapLister, recorder record.EventRecorder, ) ConfigMapControlInterface { return &realConfigMapControl{ kubeCli: kubeCli, - cmLister: cmLister, recorder: recorder, } } @@ -81,7 +77,7 @@ func (cc *realConfigMapControl) UpdateConfigMap(owner runtime.Object, cm *corev1 return nil } - if updated, err := cc.cmLister.ConfigMaps(cm.Namespace).Get(cmName); err != nil { + if updated, err := cc.kubeCli.CoreV1().ConfigMaps(cm.Namespace).Get(cmName, metav1.GetOptions{}); err != nil { utilruntime.HandleError(fmt.Errorf("error getting updated ConfigMap %s/%s from lister: %v", ns, cmName, err)) } else { cm = updated.DeepCopy() @@ -125,7 +121,6 @@ var _ ConfigMapControlInterface = &realConfigMapControl{} // NewFakeConfigMapControl returns a FakeConfigMapControl func NewFakeConfigMapControl(cmInformer coreinformers.ConfigMapInformer) *FakeConfigMapControl { return &FakeConfigMapControl{ - cmInformer.Lister(), cmInformer.Informer().GetIndexer(), RequestTracker{}, RequestTracker{}, @@ -135,7 +130,6 @@ func NewFakeConfigMapControl(cmInformer coreinformers.ConfigMapInformer) *FakeCo // FakeConfigMapControl is a fake ConfigMapControlInterface type FakeConfigMapControl struct { - CmLister corelisters.ConfigMapLister CmIndexer cache.Indexer createConfigMapTracker RequestTracker updateConfigMapTracker RequestTracker diff --git a/pkg/controller/configmap_control_test.go b/pkg/controller/configmap_control_test.go index 7fae063b12..5eaf45ba4c 100644 --- a/pkg/controller/configmap_control_test.go +++ b/pkg/controller/configmap_control_test.go @@ -23,9 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" - corelisters "k8s.io/client-go/listers/core/v1" core "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" ) @@ -35,7 +33,7 @@ func TestConfigMapControlCreatesConfigMaps(t *testing.T) { tc := newTidbCluster() cm := newConfigMap() fakeClient := &fake.Clientset{} - control := NewRealConfigMapControl(fakeClient, nil, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) fakeClient.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) { create := action.(core.CreateAction) return true, create.GetObject(), nil @@ -54,7 +52,7 @@ func TestConfigMapControlCreatesConfigMapFailed(t *testing.T) { tc := newTidbCluster() cm := newConfigMap() fakeClient := &fake.Clientset{} - control := NewRealConfigMapControl(fakeClient, nil, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) fakeClient.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) { return true, nil, apierrors.NewInternalError(errors.New("API server down")) }) @@ -73,7 +71,7 @@ func TestConfigMapControlUpdateConfigMap(t *testing.T) { cm := newConfigMap() cm.Data["file"] = "test" fakeClient := &fake.Clientset{} - control := NewRealConfigMapControl(fakeClient, nil, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) fakeClient.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) return true, update.GetObject(), nil @@ -94,13 +92,9 @@ func TestConfigMapControlUpdateConfigMapConflictSuccess(t *testing.T) { cm := newConfigMap() cm.Data["file"] = "test" fakeClient := &fake.Clientset{} - indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) oldcm := newConfigMap() oldcm.Data["file"] = "test2" - err := indexer.Add(oldcm) - g.Expect(err).To(Succeed()) - cmLister := corelisters.NewConfigMapLister(indexer) - control := NewRealConfigMapControl(fakeClient, cmLister, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) conflict := false fakeClient.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) { update := action.(core.UpdateAction) @@ -125,7 +119,7 @@ func TestConfigMapControlDeleteConfigMap(t *testing.T) { tc := newTidbCluster() cm := newConfigMap() fakeClient := &fake.Clientset{} - control := NewRealConfigMapControl(fakeClient, nil, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) fakeClient.AddReactor("delete", "configmaps", func(action core.Action) (bool, runtime.Object, error) { return true, nil, nil }) @@ -142,7 +136,7 @@ func TestConfigMapControlDeleteConfigMapFailed(t *testing.T) { tc := newTidbCluster() cm := newConfigMap() fakeClient := &fake.Clientset{} - control := NewRealConfigMapControl(fakeClient, nil, recorder) + control := NewRealConfigMapControl(fakeClient, recorder) fakeClient.AddReactor("delete", "configmaps", func(action core.Action) (bool, runtime.Object, error) { return true, nil, apierrors.NewInternalError(errors.New("API server down")) }) diff --git a/pkg/controller/restore/restore_controller.go b/pkg/controller/restore/restore_controller.go index e3930de37a..39cef67c13 100644 --- a/pkg/controller/restore/restore_controller.go +++ b/pkg/controller/restore/restore_controller.go @@ -71,7 +71,6 @@ func NewController( backupInformer := informerFactory.Pingcap().V1alpha1().Backups() jobInformer := kubeInformerFactory.Batch().V1().Jobs() pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims() - secretInformer := kubeInformerFactory.Core().V1().Secrets() statusUpdater := controller.NewRealRestoreConditionUpdater(cli, restoreInformer.Lister(), recorder) jobControl := controller.NewRealJobControl(kubeCli, recorder) pvcControl := controller.NewRealGeneralPVCControl(kubeCli, recorder) @@ -83,7 +82,7 @@ func NewController( restore.NewRestoreManager( backupInformer.Lister(), statusUpdater, - secretInformer.Lister(), + kubeCli, jobInformer.Lister(), jobControl, pvcInformer.Lister(), diff --git a/pkg/controller/secret_control.go b/pkg/controller/secret_control.go index 7392c4f9ea..a7aa6529f4 100644 --- a/pkg/controller/secret_control.go +++ b/pkg/controller/secret_control.go @@ -25,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" glog "k8s.io/klog" ) @@ -37,18 +36,15 @@ type SecretControlInterface interface { } type realSecretControl struct { - kubeCli kubernetes.Interface - secretLister corelisters.SecretLister + kubeCli kubernetes.Interface } // NewRealSecretControl creates a new SecretControlInterface func NewRealSecretControl( kubeCli kubernetes.Interface, - secretLister corelisters.SecretLister, ) SecretControlInterface { return &realSecretControl{ - kubeCli: kubeCli, - secretLister: secretLister, + kubeCli: kubeCli, } } @@ -79,7 +75,7 @@ func (rsc *realSecretControl) Create(or metav1.OwnerReference, certOpts *TiDBClu // Load loads cert and key from Secret matching the name func (rsc *realSecretControl) Load(ns string, secretName string) ([]byte, []byte, error) { - secret, err := rsc.secretLister.Secrets(ns).Get(secretName) + secret, err := rsc.kubeCli.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{}) if err != nil { return nil, nil, err } @@ -143,10 +139,8 @@ type FakeSecretControl struct { func NewFakeSecretControl( kubeCli kubernetes.Interface, - secretLister corelisters.SecretLister, ) SecretControlInterface { return &realSecretControl{ - kubeCli: kubeCli, - secretLister: secretLister, + kubeCli: kubeCli, } } diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go index 277214bafa..c2e54f30d4 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go @@ -91,19 +91,17 @@ func NewController( podInformer := kubeInformerFactory.Core().V1().Pods() nodeInformer := kubeInformerFactory.Core().V1().Nodes() csrInformer := kubeInformerFactory.Certificates().V1beta1().CertificateSigningRequests() - secretInformer := kubeInformerFactory.Core().V1().Secrets() - cmInformer := kubeInformerFactory.Core().V1().ConfigMaps() tcControl := controller.NewRealTidbClusterControl(cli, tcInformer.Lister(), recorder) pdControl := pdapi.NewDefaultPDControl(kubeCli) tidbControl := controller.NewDefaultTiDBControl() - cmControl := controller.NewRealConfigMapControl(kubeCli, cmInformer.Lister(), recorder) + cmControl := controller.NewRealConfigMapControl(kubeCli, recorder) setControl := controller.NewRealStatefuSetControl(kubeCli, setInformer.Lister(), recorder) svcControl := controller.NewRealServiceControl(kubeCli, svcInformer.Lister(), recorder) pvControl := controller.NewRealPVControl(kubeCli, pvcInformer.Lister(), pvInformer.Lister(), recorder) pvcControl := controller.NewRealPVCControl(kubeCli, recorder, pvcInformer.Lister()) podControl := controller.NewRealPodControl(kubeCli, pdControl, podInformer.Lister(), recorder) - secControl := controller.NewRealSecretControl(kubeCli, secretInformer.Lister()) + secControl := controller.NewRealSecretControl(kubeCli) certControl := controller.NewRealCertControl(kubeCli, csrInformer.Lister(), secControl) typedControl := controller.NewTypedControl(controller.NewRealGenericControl(genericCli, recorder)) pdScaler := mm.NewPDScaler(pdControl, pvcInformer.Lister(), pvcControl) @@ -162,7 +160,6 @@ func NewController( setInformer.Lister(), svcInformer.Lister(), podInformer.Lister(), - cmInformer.Lister(), tidbUpgrader, autoFailover, tidbFailover, @@ -201,7 +198,6 @@ func NewController( cmControl, setInformer.Lister(), svcInformer.Lister(), - cmInformer.Lister(), podInformer.Lister(), ), mm.NewTidbDiscoveryManager(typedControl), diff --git a/pkg/manager/member/pd_member_manager_test.go b/pkg/manager/member/pd_member_manager_test.go index b94263a983..2ec303e1dc 100644 --- a/pkg/manager/member/pd_member_manager_test.go +++ b/pkg/manager/member/pd_member_manager_test.go @@ -747,12 +747,11 @@ func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetCont pvcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().PersistentVolumeClaims() tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters() csrInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Certificates().V1beta1().CertificateSigningRequests() - secretInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Secrets() setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) podControl := controller.NewFakePodControl(podInformer) pdControl := pdapi.NewFakePDControl(kubeCli) - secControl := controller.NewFakeSecretControl(kubeCli, secretInformer.Lister()) + secControl := controller.NewFakeSecretControl(kubeCli) certControl := controller.NewFakeCertControl(kubeCli, csrInformer.Lister(), secControl) pdScaler := NewFakePDScaler() autoFailover := true diff --git a/pkg/manager/member/pump_member_manager.go b/pkg/manager/member/pump_member_manager.go index b03f069753..62b9c4d09f 100644 --- a/pkg/manager/member/pump_member_manager.go +++ b/pkg/manager/member/pump_member_manager.go @@ -43,7 +43,6 @@ type pumpMemberManager struct { cmControl controller.ConfigMapControlInterface setLister v1.StatefulSetLister svcLister corelisters.ServiceLister - cmLister corelisters.ConfigMapLister podLister corelisters.PodLister } @@ -55,7 +54,6 @@ func NewPumpMemberManager( cmControl controller.ConfigMapControlInterface, setLister v1.StatefulSetLister, svcLister corelisters.ServiceLister, - cmLister corelisters.ConfigMapLister, podLister corelisters.PodLister) manager.Manager { return &pumpMemberManager{ setControl, @@ -64,7 +62,6 @@ func NewPumpMemberManager( cmControl, setLister, svcLister, - cmLister, podLister, } } diff --git a/pkg/manager/member/pump_member_manager_test.go b/pkg/manager/member/pump_member_manager_test.go index c92e41b0de..a2401191f2 100644 --- a/pkg/manager/member/pump_member_manager_test.go +++ b/pkg/manager/member/pump_member_manager_test.go @@ -455,7 +455,6 @@ func newFakePumpMemberManager() (*pumpMemberManager, *pumpFakeControls, *pumpFak cmControl, setInformer.Lister(), svcInformer.Lister(), - cmInformer.Lister(), podInformer.Lister(), } controls := &pumpFakeControls{ diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index d99995b25e..da525cad34 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -74,7 +74,6 @@ func NewTiDBMemberManager(setControl controller.StatefulSetControlInterface, setLister v1.StatefulSetLister, svcLister corelisters.ServiceLister, podLister corelisters.PodLister, - cmLister corelisters.ConfigMapLister, tidbUpgrader Upgrader, autoFailover bool, tidbFailover Failover) manager.Manager { @@ -87,7 +86,6 @@ func NewTiDBMemberManager(setControl controller.StatefulSetControlInterface, setLister: setLister, svcLister: svcLister, podLister: podLister, - cmLister: cmLister, tidbUpgrader: tidbUpgrader, autoFailover: autoFailover, tidbFailover: tidbFailover, diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go index a6b973d102..8a7955bea3 100644 --- a/pkg/manager/member/tidb_member_manager_test.go +++ b/pkg/manager/member/tidb_member_manager_test.go @@ -756,7 +756,7 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet cmInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().ConfigMaps() setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) - secControl := controller.NewFakeSecretControl(kubeCli, secretInformer.Lister()) + secControl := controller.NewFakeSecretControl(kubeCli) certControl := controller.NewFakeCertControl(kubeCli, csrInformer.Lister(), secControl) genericControl := controller.NewFakeGenericControl() tidbUpgrader := NewFakeTiDBUpgrader() From b939e87fb59f398e399755f6f51e88df6883f424 Mon Sep 17 00:00:00 2001 From: Aylei Date: Fri, 10 Jan 2020 21:01:29 +0800 Subject: [PATCH 2/3] address review comments Signed-off-by: Aylei --- cmd/controller-manager/main.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/cmd/controller-manager/main.go b/cmd/controller-manager/main.go index 3bd0f5b556..ba97c4846f 100644 --- a/cmd/controller-manager/main.go +++ b/cmd/controller-manager/main.go @@ -138,11 +138,7 @@ func main() { var informerFactory informers.SharedInformerFactory var kubeInformerFactory kubeinformers.SharedInformerFactory labelSelector := fmt.Sprintf("%s=%s", label.ManagedByLabelKey, label.TiDBOperator) - options := []informers.SharedInformerOption{ - informers.WithTweakListOptions(func(listOpts *metav1.ListOptions) { - listOpts.LabelSelector = labelSelector - }), - } + var options []informers.SharedInformerOption kubeoptions := []kubeinformers.SharedInformerOption{ kubeinformers.WithTweakListOptions(func(listOpts *metav1.ListOptions) { listOpts.LabelSelector = labelSelector From c69f5eb43d10fce0c2d6abdfeada2e8ec8c4a69e Mon Sep 17 00:00:00 2001 From: Aylei Date: Wed, 8 Apr 2020 11:41:53 +0800 Subject: [PATCH 3/3] remove label selector of informer Signed-off-by: Aylei --- cmd/controller-manager/main.go | 9 +-------- pkg/backup/backup/backup_cleaner.go | 3 +-- pkg/backup/backup/backup_manager.go | 8 ++++---- pkg/backup/restore/restore_manager.go | 8 ++++---- pkg/backup/util/util.go | 6 +++--- pkg/controller/secret_control.go | 1 - pkg/controller/tidbcluster/tidb_cluster_controller.go | 2 +- pkg/manager/member/pump_member_manager_test.go | 3 +-- 8 files changed, 15 insertions(+), 25 deletions(-) diff --git a/cmd/controller-manager/main.go b/cmd/controller-manager/main.go index 094b2d30ef..aca09196a9 100644 --- a/cmd/controller-manager/main.go +++ b/cmd/controller-manager/main.go @@ -16,7 +16,6 @@ package main import ( "context" "flag" - "fmt" "net/http" _ "net/http/pprof" "os" @@ -36,7 +35,6 @@ import ( "github.com/pingcap/tidb-operator/pkg/controller/tidbinitializer" "github.com/pingcap/tidb-operator/pkg/controller/tidbmonitor" "github.com/pingcap/tidb-operator/pkg/features" - "github.com/pingcap/tidb-operator/pkg/label" "github.com/pingcap/tidb-operator/pkg/scheme" "github.com/pingcap/tidb-operator/pkg/upgrader" "github.com/pingcap/tidb-operator/pkg/version" @@ -150,13 +148,8 @@ func main() { var informerFactory informers.SharedInformerFactory var kubeInformerFactory kubeinformers.SharedInformerFactory - labelSelector := fmt.Sprintf("%s=%s", label.ManagedByLabelKey, label.TiDBOperator) var options []informers.SharedInformerOption - kubeoptions := []kubeinformers.SharedInformerOption{ - kubeinformers.WithTweakListOptions(func(listOpts *metav1.ListOptions) { - listOpts.LabelSelector = labelSelector - }), - } + var kubeoptions []kubeinformers.SharedInformerOption if !controller.ClusterScoped { options = append(options, informers.WithNamespace(ns)) kubeoptions = append(kubeoptions, kubeinformers.WithNamespace(ns)) diff --git a/pkg/backup/backup/backup_cleaner.go b/pkg/backup/backup/backup_cleaner.go index 50816aa2eb..81df6426e6 100644 --- a/pkg/backup/backup/backup_cleaner.go +++ b/pkg/backup/backup/backup_cleaner.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" batchlisters "k8s.io/client-go/listers/batch/v1" - corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/klog" ) @@ -113,7 +112,7 @@ func (bc *backupCleaner) makeCleanJob(backup *v1alpha1.Backup) (*batchv1.Job, st ns := backup.GetNamespace() name := backup.GetName() - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bc.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bc.kubeCli) if err != nil { return nil, reason, err } diff --git a/pkg/backup/backup/backup_manager.go b/pkg/backup/backup/backup_manager.go index 2d7c46d295..f692d17225 100644 --- a/pkg/backup/backup/backup_manager.go +++ b/pkg/backup/backup/backup_manager.go @@ -169,12 +169,12 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s ns := backup.GetNamespace() name := backup.GetName() - envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.secretLister) + envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.kubeCli) if err != nil { return nil, reason, err } - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err) } @@ -269,12 +269,12 @@ func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, s return nil, fmt.Sprintf("failed to fetch tidbcluster %s/%s", backupNamespace, backup.Spec.BR.Cluster), err } - envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.secretLister) + envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.kubeCli) if err != nil { return nil, reason, err } - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err) } diff --git a/pkg/backup/restore/restore_manager.go b/pkg/backup/restore/restore_manager.go index 390429a388..5f0d971186 100644 --- a/pkg/backup/restore/restore_manager.go +++ b/pkg/backup/restore/restore_manager.go @@ -160,12 +160,12 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job ns := restore.GetNamespace() name := restore.GetName() - envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.secretLister) + envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.kubeCli) if err != nil { return nil, reason, err } - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("restore %s/%s, %v", ns, name, err) } @@ -254,12 +254,12 @@ func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Jo return nil, fmt.Sprintf("failed to fetch tidbcluster %s/%s", restoreNamespace, restore.Spec.BR.Cluster), err } - envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.secretLister) + envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.kubeCli) if err != nil { return nil, reason, err } - storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.secretLister) + storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.kubeCli) if err != nil { return nil, reason, fmt.Errorf("restore %s/%s, %v", ns, name, err) } diff --git a/pkg/backup/util/util.go b/pkg/backup/util/util.go index ab35c14bec..05a8ec1fbe 100644 --- a/pkg/backup/util/util.go +++ b/pkg/backup/util/util.go @@ -161,7 +161,7 @@ func GenerateGcsCertEnvVar(gcs *v1alpha1.GcsStorageProvider) ([]corev1.EnvVar, s } // GenerateStorageCertEnv generate the env info in order to access backend backup storage -func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StorageProvider, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) { +func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StorageProvider, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) { var certEnv []corev1.EnvVar var reason string var err error @@ -222,10 +222,10 @@ func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StoragePro } // GenerateTidbPasswordEnv generate the password EnvVar -func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, useKMS bool, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) { +func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, useKMS bool, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) { var certEnv []corev1.EnvVar var passwordKey string - secret, err := secretLister.Secrets(ns).Get(tidbSecretName) + secret, err := kubeCli.CoreV1().Secrets(ns).Get(tidbSecretName, metav1.GetOptions{}) if err != nil { err = fmt.Errorf("backup %s/%s get tidb secret %s failed, err: %v", ns, name, tidbSecretName, err) return certEnv, "GetTidbSecretFailed", err diff --git a/pkg/controller/secret_control.go b/pkg/controller/secret_control.go index 4f79522c96..4b443574bb 100644 --- a/pkg/controller/secret_control.go +++ b/pkg/controller/secret_control.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/klog" ) diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go index a08da8427b..dba0a9e7bb 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_controller.go +++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go @@ -95,7 +95,7 @@ func NewController( tcControl := controller.NewRealTidbClusterControl(cli, tcInformer.Lister(), recorder) pdControl := pdapi.NewDefaultPDControl(kubeCli) tidbControl := controller.NewDefaultTiDBControl(kubeCli) - cmControl := controller.NewRealConfigMapControl(kubeCli, cmInformer.Lister(), recorder) + cmControl := controller.NewRealConfigMapControl(kubeCli, recorder) setControl := controller.NewRealStatefuSetControl(kubeCli, setInformer.Lister(), recorder) svcControl := controller.NewRealServiceControl(kubeCli, svcInformer.Lister(), recorder) pvControl := controller.NewRealPVControl(kubeCli, pvcInformer.Lister(), pvInformer.Lister(), recorder) diff --git a/pkg/manager/member/pump_member_manager_test.go b/pkg/manager/member/pump_member_manager_test.go index 3ee032cff2..69b73ee6e4 100644 --- a/pkg/manager/member/pump_member_manager_test.go +++ b/pkg/manager/member/pump_member_manager_test.go @@ -441,13 +441,12 @@ func newFakePumpMemberManager() (*pumpMemberManager, *pumpFakeControls, *pumpFak setInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Apps().V1().StatefulSets() tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters() svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services() - secretInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Secrets() csrInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Certificates().V1beta1().CertificateSigningRequests() epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints() cmInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().ConfigMaps() podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods() setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer) - secControl := controller.NewFakeSecretControl(kubeCli, secretInformer.Lister()) + secControl := controller.NewFakeSecretControl(kubeCli) certControl := controller.NewFakeCertControl(kubeCli, csrInformer.Lister(), secControl) svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer) cmControl := controller.NewFakeConfigMapControl(cmInformer)