Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…ator into pd-replicas-1
  • Loading branch information
weekface committed May 24, 2019
2 parents 83364c9 + 219b1c0 commit 4830fe1
Show file tree
Hide file tree
Showing 44 changed files with 2,450 additions and 548 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ tests/images/fault-trigger/bin/
tests/images/e2e/tidb-cluster/
tests/images/e2e/tidb-backup/
tests/images/e2e/tidb-operator/
tests/images/e2e/manifests/
*.tar
tmp/
data/
Expand Down
7 changes: 6 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ docker-push: docker
docker: build
docker build --tag "${DOCKER_REGISTRY}/pingcap/tidb-operator:latest" images/tidb-operator

build: controller-manager scheduler discovery
build: controller-manager scheduler discovery admission-controller

controller-manager:
$(GO) -ldflags '$(LDFLAGS)' -o images/tidb-operator/bin/tidb-controller-manager cmd/controller-manager/main.go
Expand All @@ -39,6 +39,9 @@ scheduler:
discovery:
$(GO) -ldflags '$(LDFLAGS)' -o images/tidb-operator/bin/tidb-discovery cmd/discovery/main.go

admission-controller:
$(GO) -ldflags '$(LDFLAGS)' -o images/tidb-operator/bin/tidb-admission-controller cmd/admission-controller/main.go

e2e-setup:
# ginkgo doesn't work with retool for Go 1.11
@GO111MODULE=on CGO_ENABLED=0 go get github.com/onsi/ginkgo@v1.6.0
Expand All @@ -50,9 +53,11 @@ e2e-docker: e2e-build
[ -d tests/images/e2e/tidb-operator ] && rm -r tests/images/e2e/tidb-operator || true
[ -d tests/images/e2e/tidb-cluster ] && rm -r tests/images/e2e/tidb-cluster || true
[ -d tests/images/e2e/tidb-backup ] && rm -r tests/images/e2e/tidb-backup || true
[ -d tests/images/e2e/manifests ] && rm -r tests/images/e2e/manifests || true
cp -r charts/tidb-operator tests/images/e2e
cp -r charts/tidb-cluster tests/images/e2e
cp -r charts/tidb-backup tests/images/e2e
cp -r manifests tests/images/e2e
docker build -t "${DOCKER_REGISTRY}/pingcap/tidb-operator-e2e:latest" tests/images/e2e

e2e-build: e2e-setup
Expand Down
2 changes: 1 addition & 1 deletion charts/tidb-cluster/templates/config/_pd-config.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ max-replicas = {{ .Values.pd.maxReplicas }}
# The placement priorities is implied by the order of label keys.
# For example, ["zone", "rack"] means that we should place replicas to
# different zones first, then to different racks if we don't have enough zones.
location-labels = ["zone", "rack", "host"]
location-labels = ["region", "zone", "rack", "host"]

[label-property]
# Do not assign region leaders to stores that have these tags.
Expand Down
15 changes: 6 additions & 9 deletions charts/tidb-cluster/templates/tidb-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,10 @@ spec:
{{- if .Values.pd.resources }}
{{ toYaml .Values.pd.resources | indent 4 }}
{{- end }}
{{- if .Values.pd.nodeSelector }}
affinity:
{{ toYaml .Values.pd.affinity | indent 6 }}
nodeSelector:
{{ toYaml .Values.pd.nodeSelector | indent 6 }}
{{- end }}
nodeSelectorRequired: {{ .Values.nodeSelectorRequired | default true }}
{{- if .Values.pd.tolerations }}
tolerations:
{{ toYaml .Values.pd.tolerations | indent 4 }}
Expand All @@ -56,11 +55,10 @@ spec:
{{- if .Values.tikv.resources }}
{{ toYaml .Values.tikv.resources | indent 4 }}
{{- end }}
{{- if .Values.tikv.nodeSelector }}
affinity:
{{ toYaml .Values.tikv.affinity | indent 6 }}
nodeSelector:
{{ toYaml .Values.tikv.nodeSelector | indent 6 }}
{{- end }}
nodeSelectorRequired: {{ .Values.nodeSelectorRequired | default true }}
{{- if .Values.tikv.tolerations }}
tolerations:
{{ toYaml .Values.tikv.tolerations | indent 4 }}
Expand All @@ -76,11 +74,10 @@ spec:
{{- if .Values.tidb.resources }}
{{ toYaml .Values.tidb.resources | indent 4 }}
{{- end }}
{{- if .Values.tidb.nodeSelector }}
affinity:
{{ toYaml .Values.tidb.affinity | indent 6 }}
nodeSelector:
{{ toYaml .Values.tidb.nodeSelector | indent 6 }}
{{- end }}
nodeSelectorRequired: {{ .Values.nodeSelectorRequired | default true }}
{{- if .Values.tidb.tolerations }}
tolerations:
{{ toYaml .Values.tidb.tolerations | indent 4 }}
Expand Down
103 changes: 88 additions & 15 deletions charts/tidb-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -73,16 +73,72 @@ pd:
# cpu: 4000m
# memory: 4Gi
storage: 1Gi
# nodeSelector is used for scheduling pod,
# if nodeSelectorRequired is true, all the following labels must be matched

## affinity defines pd scheduling rules,it's default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## The following is typical example of affinity settings:
## The PodAntiAffinity setting of the example keeps PD pods does not co-locate on a topology node as far as possible to improve the disaster tolerance of PD on Kubernetes.
## The NodeAffinity setting of the example ensure that the PD pods can only be scheduled to nodes with label:[type="pd"],
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# # this term work when the nodes have the label named region
# - weight: 10
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "region"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named zone
# - weight: 20
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "zone"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named rack
# - weight: 40
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "rack"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named kubernetes.io/hostname
# - weight: 80
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "kubernetes.io/hostname"
# namespaces:
# - <helm namespace>
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: "kind"
# operator: In
# values:
# - "pd"

## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
# kind: pd
# # zone is comma separated availability zone list
# zone: cn-bj1-01,cn-bj1-02
# # region is comma separated region list
# region: cn-bj1
# Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
# refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration

## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
Expand Down Expand Up @@ -117,10 +173,18 @@ tikv:
# cpu: 12000m
# memory: 24Gi
storage: 10Gi

## affinity defines tikv scheduling rules,affinity default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}

## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
# kind: tikv
# zone: cn-bj1-01,cn-bj1-02
# region: cn-bj1

## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
Expand Down Expand Up @@ -196,10 +260,19 @@ tidb:
requests: {}
# cpu: 12000m
# memory: 12Gi


## affinity defines tikv scheduling rules,affinity default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}

## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
# kind: tidb
# zone: cn-bj1-01,cn-bj1-02
# region: cn-bj1

## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
Expand Down
96 changes: 96 additions & 0 deletions cmd/admission-controller/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.

package main

import (
"flag"
"os"
"os/signal"
"syscall"

"github.com/golang/glog"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
"github.com/pingcap/tidb-operator/pkg/webhook"
"github.com/pingcap/tidb-operator/version"
"k8s.io/apiserver/pkg/util/logs"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)

var (
printVersion bool
certFile string
keyFile string
)

func init() {
flag.BoolVar(&printVersion, "V", false, "Show version and quit")
flag.BoolVar(&printVersion, "version", false, "Show version and quit")
flag.StringVar(&certFile, "tlsCertFile", "/etc/webhook/certs/cert.pem", "File containing the x509 Certificate for HTTPS.")
flag.StringVar(&keyFile, "tlsKeyFile", "/etc/webhook/certs/key.pem", "File containing the x509 private key to --tlsCertFile.")
flag.Parse()
}

func main() {

logs.InitLogs()
defer logs.FlushLogs()

if printVersion {
version.PrintVersionInfo()
os.Exit(0)
}
version.LogVersionInfo()

cfg, err := rest.InClusterConfig()
if err != nil {
glog.Fatalf("failed to get config: %v", err)
}

cli, err := versioned.NewForConfig(cfg)
if err != nil {
glog.Fatalf("failed to create Clientset: %v", err)
}

kubeCli, err := kubernetes.NewForConfig(cfg)
if err != nil {
glog.Fatalf("failed to get kubernetes Clientset: %v", err)
}

webhookServer := webhook.NewWebHookServer(kubeCli, cli, certFile, keyFile)

sigs := make(chan os.Signal, 1)
done := make(chan bool, 1)

signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)

go func() {
<-sigs

// Graceful shutdown the server
if err := webhookServer.Shutdown(); err != nil {
glog.Errorf("fail to shutdown server %v", err)
}

done <- true
}()

if err := webhookServer.Run(); err != nil {
glog.Errorf("stop http server %v", err)
}

<-done

glog.Infof("webhook server terminate safely.")
}
4 changes: 4 additions & 0 deletions deploy/gcp/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
.terraform
*.tfstate*
credentials
rendered
Loading

0 comments on commit 4830fe1

Please sign in to comment.