Skip to content

Commit

Permalink
Merge branch 'master' into resume_paused_backup
Browse files Browse the repository at this point in the history
  • Loading branch information
ti-chi-bot[bot] authored Dec 18, 2023
2 parents 3c638e3 + b061322 commit 68a4d51
Show file tree
Hide file tree
Showing 11 changed files with 136 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,9 @@ spec:
{{- if .Values.brFederationManager.leaderRetryPeriod }}
- -leader-retry-period={{ .Values.brFederationManager.leaderRetryPeriod }}
{{- end }}
{{- if .Values.brFederationManager.leaderResourceLock }}
- -leader-resource-lock={{ .Values.controllerManager.leaderResourceLock }}
{{- end }}
{{- if .Values.brFederationManager.kubeClientQPS }}
- -kube-client-qps={{ .Values.brFederationManager.kubeClientQPS }}
{{- end }}
Expand Down
6 changes: 6 additions & 0 deletions charts/br-federation/templates/controller-manager-rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ rules:
- apiGroups: [""]
resources: ["endpoints", "events"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "get", "list", "watch", "update","delete"]
- apiGroups: ["federation.pingcap.com"]
resources: ["*"]
verbs: ["*"]
Expand Down Expand Up @@ -73,6 +76,9 @@ rules:
- apiGroups: [""]
resources: ["endpoints", "events"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "get", "list", "watch", "update","delete"]
- apiGroups: ["federation.pingcap.com"]
resources: ["*"]
verbs: ["*"]
Expand Down
3 changes: 3 additions & 0 deletions charts/br-federation/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ brFederationManager:
# leaderRenewDeadline: 10s
## leaderRetryPeriod is the duration the LeaderElector clients should wait between tries of actions
# leaderRetryPeriod: 2s
## leaderResourceLock is the type of resource object that will be used for locking during leader election
## If using "endpoints" before and want to migrate to "leases", you should migrate to "endpointsleases" first
# leaderResourceLock: "leases"

## number of workers that are allowed to sync concurrently. default 5
# workers: 5
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,9 @@ spec:
{{- if .Values.controllerManager.leaderRetryPeriod }}
- -leader-retry-period={{ .Values.controllerManager.leaderRetryPeriod }}
{{- end }}
{{- if .Values.controllerManager.leaderResourceLock }}
- -leader-resource-lock={{ .Values.controllerManager.leaderResourceLock }}
{{- end }}
{{- if .Values.controllerManager.kubeClientQPS }}
- -kube-client-qps={{ .Values.controllerManager.kubeClientQPS }}
{{- end }}
Expand Down
6 changes: 6 additions & 0 deletions charts/tidb-operator/templates/controller-manager-rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ rules:
- apiGroups: [""]
resources: ["endpoints","configmaps"]
verbs: ["create", "get", "list", "watch", "update","delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "get", "list", "watch", "update","delete"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["create","get","update","delete"]
Expand Down Expand Up @@ -195,6 +198,9 @@ rules:
- apiGroups: [""]
resources: ["endpoints","configmaps"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "get", "list", "watch", "update","delete"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["create","get","update","delete"]
Expand Down
5 changes: 4 additions & 1 deletion charts/tidb-operator/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,9 @@ controllerManager:
# leaderRenewDeadline: 10s
## leaderRetryPeriod is the duration the LeaderElector clients should wait between tries of actions
# leaderRetryPeriod: 2s
## leaderResourceLock is the type of resource object that will be used for locking during leader election
## If using "endpoints" before and want to migrate to "leases", you should migrate to "endpointsleases" first
# leaderResourceLock: "leases"

## number of workers that are allowed to sync concurrently. default 5
# workers: 5
Expand Down Expand Up @@ -149,7 +152,7 @@ controllerManager:
# kubeClientBurst: 10

scheduler:
create: true
create: false
# With rbac.create=false, the user is responsible for creating this account
# With rbac.create=true, this service account will be created
# Also see rbac.create and clusterScoped
Expand Down
52 changes: 49 additions & 3 deletions cmd/br-federation-manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,8 +176,10 @@ func main() {
}
// leader election for multiple br-federation-manager instances
go wait.Forever(func() {
leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{
Lock: &resourcelock.EndpointsLock{
var lock resourcelock.Interface
switch cliCfg.ResourceLock {
case resourcelock.EndpointsResourceLock:
lock = &resourcelock.EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: ns,
Name: endPointsName,
Expand All @@ -187,7 +189,51 @@ func main() {
Identity: hostName,
EventRecorder: &record.FakeRecorder{},
},
},
}
case resourcelock.LeasesResourceLock:
lock = &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Namespace: ns,
Name: endPointsName,
},
Client: kubeCli.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: hostName,
EventRecorder: &record.FakeRecorder{},
},
}
case resourcelock.EndpointsLeasesResourceLock:
lock = &resourcelock.MultiLock{
Primary: &resourcelock.EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: ns,
Name: endPointsName,
},
Client: kubeCli.CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: hostName,
EventRecorder: &record.FakeRecorder{},
},
},
Secondary: &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Namespace: ns,
Name: endPointsName,
},
Client: kubeCli.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: hostName,
EventRecorder: &record.FakeRecorder{},
},
},
}
default:
// we don't support configmap lock now
klog.Fatalf("only support endpoints, leases or endpointsleases for resource-lock, but got %s", cliCfg.ResourceLock)
}

leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{
Lock: lock,
LeaseDuration: cliCfg.LeaseDuration,
RenewDeadline: cliCfg.RenewDeadline,
RetryPeriod: cliCfg.RetryPeriod,
Expand Down
52 changes: 49 additions & 3 deletions cmd/controller-manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,8 +221,10 @@ func main() {
}
// leader election for multiple tidb-controller-manager instances
go wait.Forever(func() {
leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{
Lock: &resourcelock.EndpointsLock{
var lock resourcelock.Interface
switch cliCfg.ResourceLock {
case resourcelock.EndpointsResourceLock:
lock = &resourcelock.EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: ns,
Name: endPointsName,
Expand All @@ -232,7 +234,51 @@ func main() {
Identity: hostName,
EventRecorder: &record.FakeRecorder{},
},
},
}
case resourcelock.LeasesResourceLock:
lock = &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Namespace: ns,
Name: endPointsName,
},
Client: kubeCli.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: hostName,
EventRecorder: &record.FakeRecorder{},
},
}
case resourcelock.EndpointsLeasesResourceLock:
lock = &resourcelock.MultiLock{
Primary: &resourcelock.EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: ns,
Name: endPointsName,
},
Client: kubeCli.CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: hostName,
EventRecorder: &record.FakeRecorder{},
},
},
Secondary: &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Namespace: ns,
Name: endPointsName,
},
Client: kubeCli.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: hostName,
EventRecorder: &record.FakeRecorder{},
},
},
}
default:
// we don't support configmap lock now
klog.Fatalf("only support endpoints, leases or endpointsleases for resource-lock, but got %s", cliCfg.ResourceLock)
}

leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{
Lock: lock,
LeaseDuration: cliCfg.LeaseDuration,
RenewDeadline: cliCfg.RenewDeadline,
RetryPeriod: cliCfg.RetryPeriod,
Expand Down
5 changes: 5 additions & 0 deletions pkg/controller/br_fed_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ package controller
import (
"flag"
"time"

"k8s.io/client-go/tools/leaderelection/resourcelock"
)

const (
Expand All @@ -36,6 +38,7 @@ type BrFedCLIConfig struct {
LeaseDuration time.Duration
RenewDeadline time.Duration
RetryPeriod time.Duration
ResourceLock string
WaitDuration time.Duration
// ResyncDuration is the resync time of informer
ResyncDuration time.Duration
Expand All @@ -56,6 +59,7 @@ func DefaultBrFedCLIConfig() *BrFedCLIConfig {
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
ResourceLock: resourcelock.EndpointsResourceLock,
WaitDuration: 5 * time.Second,
ResyncDuration: 30 * time.Second,

Expand All @@ -75,6 +79,7 @@ func (c *BrFedCLIConfig) AddFlag(_ *flag.FlagSet) {
flag.DurationVar(&c.LeaseDuration, "leader-lease-duration", c.LeaseDuration, "leader-lease-duration is the duration that non-leader candidates will wait to force acquire leadership")
flag.DurationVar(&c.RenewDeadline, "leader-renew-deadline", c.RenewDeadline, "leader-renew-deadline is the duration that the acting master will retry refreshing leadership before giving up")
flag.DurationVar(&c.RetryPeriod, "leader-retry-period", c.RetryPeriod, "leader-retry-period is the duration the LeaderElector clients should wait between tries of actions")
flag.StringVar(&c.ResourceLock, "leader-resource-lock", c.ResourceLock, "The type of resource object that is used for locking during leader election")
flag.Float64Var(&c.KubeClientQPS, "kube-client-qps", c.KubeClientQPS, "The maximum QPS to the kubenetes API server from client")
flag.IntVar(&c.KubeClientBurst, "kube-client-burst", c.KubeClientBurst, "The maximum burst for throttle to the kubenetes API server from client")

Expand Down
4 changes: 4 additions & 0 deletions pkg/controller/dependences.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
extensionslister "k8s.io/client-go/listers/extensions/v1beta1"
networklister "k8s.io/client-go/listers/networking/v1"
storagelister "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -76,6 +77,7 @@ type CLIConfig struct {
LeaseDuration time.Duration
RenewDeadline time.Duration
RetryPeriod time.Duration
ResourceLock string
WaitDuration time.Duration
// ResyncDuration is the resync time of informer
ResyncDuration time.Duration
Expand Down Expand Up @@ -112,6 +114,7 @@ func DefaultCLIConfig() *CLIConfig {
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
ResourceLock: resourcelock.EndpointsResourceLock,
WaitDuration: 5 * time.Second,
ResyncDuration: 30 * time.Second,
PodHardRecoveryPeriod: 24 * time.Hour,
Expand Down Expand Up @@ -151,6 +154,7 @@ func (c *CLIConfig) AddFlag(_ *flag.FlagSet) {
flag.DurationVar(&c.LeaseDuration, "leader-lease-duration", c.LeaseDuration, "leader-lease-duration is the duration that non-leader candidates will wait to force acquire leadership")
flag.DurationVar(&c.RenewDeadline, "leader-renew-deadline", c.RenewDeadline, "leader-renew-deadline is the duration that the acting master will retry refreshing leadership before giving up")
flag.DurationVar(&c.RetryPeriod, "leader-retry-period", c.RetryPeriod, "leader-retry-period is the duration the LeaderElector clients should wait between tries of actions")
flag.StringVar(&c.ResourceLock, "leader-resource-lock", c.ResourceLock, "The type of resource object that is used for locking during leader election")
flag.Float64Var(&c.KubeClientQPS, "kube-client-qps", c.KubeClientQPS, "The maximum QPS to the kubenetes API server from client")
flag.IntVar(&c.KubeClientBurst, "kube-client-burst", c.KubeClientBurst, "The maximum burst for throttle to the kubenetes API server from client")
}
Expand Down
8 changes: 4 additions & 4 deletions tests/pkg/fixture/fixture.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@ func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster {
Helper: &v1alpha1.HelperSpec{
Image: pointer.StringPtr(utilimage.HelperImage),
},
SchedulerName: "tidb-scheduler",
Timezone: "Asia/Shanghai",
// SchedulerName: "tidb-scheduler", // use the default k8s scheduler now
Timezone: "Asia/Shanghai",
Labels: map[string]string{
ClusterCustomKey: "value",
},
Expand Down Expand Up @@ -204,8 +204,8 @@ func GetDMCluster(ns, name, version string) *v1alpha1.DMCluster {
Version: version,
ImagePullPolicy: corev1.PullIfNotPresent,
PVReclaimPolicy: &deletePVP,
SchedulerName: "tidb-scheduler",
Timezone: "Asia/Shanghai",
// SchedulerName: "tidb-scheduler",
Timezone: "Asia/Shanghai",
Labels: map[string]string{
ClusterCustomKey: "value",
},
Expand Down

0 comments on commit 68a4d51

Please sign in to comment.