Skip to content

Commit

Permalink
Support hostNetwork (#774)
Browse files Browse the repository at this point in the history
* Support hostNetwork

* Use POD_NAME instead of HOSTNAME

* Update chart

* e2e test

* remove PodSpec
  • Loading branch information
cofyc authored Aug 21, 2019
1 parent 89d41bd commit e17d281
Show file tree
Hide file tree
Showing 10 changed files with 84 additions and 7 deletions.
4 changes: 2 additions & 2 deletions charts/tidb-cluster/templates/scripts/_start_pd.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ fi

# the general form of variable PEER_SERVICE_NAME is: "<clusterName>-pd-peer"
cluster_name=`echo ${PEER_SERVICE_NAME} | sed 's/-pd-peer//'`
domain="${HOSTNAME}.${PEER_SERVICE_NAME}.${NAMESPACE}.svc"
domain="${POD_NAME}.${PEER_SERVICE_NAME}.${NAMESPACE}.svc"
discovery_url="${cluster_name}-discovery.${NAMESPACE}.svc:10261"
encoded_domain_url=`echo ${domain}:2380 | base64 | tr "\n" " " | sed "s/ //g"`

Expand All @@ -57,7 +57,7 @@ while true; do
done

ARGS="--data-dir=/var/lib/pd \
--name=${HOSTNAME} \
--name=${POD_NAME} \
--peer-urls=http://0.0.0.0:2380 \
--advertise-peer-urls=http://${domain}:2380 \
--client-urls=http://0.0.0.0:2379 \
Expand Down
2 changes: 1 addition & 1 deletion charts/tidb-cluster/templates/scripts/_start_tikv.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ then
fi

ARGS="--pd=${CLUSTER_NAME}-pd:2379 \
--advertise-addr=${HOSTNAME}.${HEADLESS_SERVICE_NAME}.${NAMESPACE}.svc:20160 \
--advertise-addr=${POD_NAME}.${HEADLESS_SERVICE_NAME}.${NAMESPACE}.svc:20160 \
--addr=0.0.0.0:20160 \
--data-dir=/var/lib/tikv \
--capacity=${CAPACITY} \
Expand Down
3 changes: 3 additions & 0 deletions charts/tidb-cluster/templates/tidb-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ spec:
annotations:
{{ toYaml .Values.pd.annotations | indent 6 }}
{{- end }}
hostNetwork: {{ .Values.pd.hostNetwork }}
tikv:
replicas: {{ .Values.tikv.replicas }}
image: {{ .Values.tikv.image }}
Expand All @@ -67,6 +68,7 @@ spec:
annotations:
{{ toYaml .Values.tikv.annotations | indent 6 }}
{{- end }}
hostNetwork: {{ .Values.tikv.hostNetwork }}
tidb:
replicas: {{ .Values.tidb.replicas }}
image: {{ .Values.tidb.image }}
Expand All @@ -86,6 +88,7 @@ spec:
annotations:
{{ toYaml .Values.tidb.annotations | indent 6 }}
{{- end }}
hostNetwork: {{ .Values.tidb.hostNetwork }}
binlogEnabled: {{ .Values.binlog.pump.create | default false }}
maxFailoverCount: {{ .Values.tidb.maxFailoverCount | default 3 }}
separateSlowLog: {{ .Values.tidb.separateSlowLog | default false }}
Expand Down
13 changes: 13 additions & 0 deletions charts/tidb-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,10 @@ pd:
# effect: "NoSchedule"
annotations: {}

# Use the host's network namespace if enabled.
# Default to false.
hostNetwork: false

tikv:
# Please refer to https://github.com/tikv/tikv/blob/master/etc/config-template.toml for the default
# tikv configurations (change to the tags of your tikv version),
Expand Down Expand Up @@ -248,6 +252,10 @@ tikv:
# effect: "NoSchedule"
annotations: {}

# Use the host's network namespace if enabled.
# Default to false.
hostNetwork: false

tidb:
# Please refer to https://github.com/pingcap/tidb/blob/master/config/config.toml.example for the default
# tidb configurations(change to the tags of your tidb version),
Expand Down Expand Up @@ -297,6 +305,11 @@ tidb:
# value: tidb
# effect: "NoSchedule"
annotations: {}

# Use the host's network namespace if enabled.
# Default to false.
hostNetwork: false

maxFailoverCount: 3
service:
type: NodePort
Expand Down
11 changes: 7 additions & 4 deletions pkg/apis/pingcap.com/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,10 @@ type PDSpec struct {
Replicas int32 `json:"replicas"`
Affinity *corev1.Affinity `json:"affinity,omitempty"`
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
HostNetwork bool `json:"hostNetwork,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
}

// TiDBSpec contains details of PD member
Expand All @@ -120,9 +121,10 @@ type TiDBSpec struct {
Replicas int32 `json:"replicas"`
Affinity *corev1.Affinity `json:"affinity,omitempty"`
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
HostNetwork bool `json:"hostNetwork,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
BinlogEnabled bool `json:"binlogEnabled,omitempty"`
MaxFailoverCount int32 `json:"maxFailoverCount,omitempty"`
SeparateSlowLog bool `json:"separateSlowLog,omitempty"`
Expand All @@ -137,13 +139,14 @@ type TiDBSlowLogTailerSpec struct {
// TiKVSpec contains details of PD member
type TiKVSpec struct {
ContainerSpec
Privileged bool `json:"privileged,omitempty"`
Replicas int32 `json:"replicas"`
Affinity *corev1.Affinity `json:"affinity,omitempty"`
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
HostNetwork bool `json:"hostNetwork,omitempty"`
Privileged bool `json:"privileged,omitempty"`
StorageClassName string `json:"storageClassName,omitempty"`
}

// TiKVPromGatewaySpec runs as a sidecar with TiKVSpec
Expand Down
15 changes: 15 additions & 0 deletions pkg/manager/member/pd_member_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -484,6 +484,11 @@ func (pmm *pdMemberManager) getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster)
}
}

dnsPolicy := corev1.DNSClusterFirst // same as k8s defaults
if tc.Spec.PD.HostNetwork {
dnsPolicy = corev1.DNSClusterFirstWithHostNet
}

pdSet := &apps.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: setName,
Expand All @@ -503,6 +508,8 @@ func (pmm *pdMemberManager) getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster)
SchedulerName: tc.Spec.SchedulerName,
Affinity: tc.Spec.PD.Affinity,
NodeSelector: tc.Spec.PD.NodeSelector,
HostNetwork: tc.Spec.PD.HostNetwork,
DNSPolicy: dnsPolicy,
Containers: []corev1.Container{
{
Name: v1alpha1.PDMemberType.String(),
Expand Down Expand Up @@ -532,6 +539,14 @@ func (pmm *pdMemberManager) getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster)
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "PEER_SERVICE_NAME",
Value: controller.PDPeerMemberName(tcName),
Expand Down
7 changes: 7 additions & 0 deletions pkg/manager/member/tidb_member_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,11 @@ func (tmm *tidbMemberManager) getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbClust
},
})

dnsPolicy := corev1.DNSClusterFirst // same as k8s defaults
if tc.Spec.PD.HostNetwork {
dnsPolicy = corev1.DNSClusterFirstWithHostNet
}

tidbLabel := label.New().Instance(instanceName).TiDB()
podAnnotations := CombineAnnotations(controller.AnnProm(10080), tc.Spec.TiDB.Annotations)
tidbSet := &apps.StatefulSet{
Expand All @@ -350,6 +355,8 @@ func (tmm *tidbMemberManager) getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbClust
SchedulerName: tc.Spec.SchedulerName,
Affinity: tc.Spec.TiDB.Affinity,
NodeSelector: tc.Spec.TiDB.NodeSelector,
HostNetwork: tc.Spec.PD.HostNetwork,
DNSPolicy: dnsPolicy,
Containers: containers,
RestartPolicy: corev1.RestartPolicyAlways,
Tolerations: tc.Spec.TiDB.Tolerations,
Expand Down
15 changes: 15 additions & 0 deletions pkg/manager/member/tikv_member_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,11 @@ func (tkmm *tikvMemberManager) getNewSetForTidbCluster(tc *v1alpha1.TidbCluster)
storageClassName = controller.DefaultStorageClassName
}

dnsPolicy := corev1.DNSClusterFirst // same as k8s defaults
if tc.Spec.PD.HostNetwork {
dnsPolicy = corev1.DNSClusterFirstWithHostNet
}

tikvset := &apps.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: setName,
Expand All @@ -339,6 +344,8 @@ func (tkmm *tikvMemberManager) getNewSetForTidbCluster(tc *v1alpha1.TidbCluster)
SchedulerName: tc.Spec.SchedulerName,
Affinity: tc.Spec.TiKV.Affinity,
NodeSelector: tc.Spec.TiKV.NodeSelector,
HostNetwork: tc.Spec.PD.HostNetwork,
DNSPolicy: dnsPolicy,
Containers: []corev1.Container{
{
Name: v1alpha1.TiKVMemberType.String(),
Expand Down Expand Up @@ -366,6 +373,14 @@ func (tkmm *tikvMemberManager) getNewSetForTidbCluster(tc *v1alpha1.TidbCluster)
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: "CLUSTER_NAME",
Value: tcName,
Expand Down
11 changes: 11 additions & 0 deletions tests/cluster_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,17 @@ func (tc *TidbClusterConfig) ScaleTiDB(replicas uint) *TidbClusterConfig {
return tc
}

func (tc *TidbClusterConfig) RunInHost(flag bool) *TidbClusterConfig {
val := "false"
if flag {
val = "true"
}
tc.set("pd.hostNetwork", val)
tc.set("tikv.hostNetwork", val)
tc.set("tidb.hostNetwork", val)
return tc
}

func (tc *TidbClusterConfig) UpgradePD(image string) *TidbClusterConfig {
tc.PDImage = image
return tc
Expand Down
10 changes: 10 additions & 0 deletions tests/cmd/e2e/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,16 @@ func main() {
UpdateTiDBTokenLimit(cfg.TiDBTokenLimit)
oa.UpgradeTidbClusterOrDie(cluster1)
oa.CheckTidbClusterStatusOrDie(cluster1)

// switch to host network
cluster1.RunInHost(true)
oa.UpgradeTidbClusterOrDie(cluster1)
oa.CheckTidbClusterStatusOrDie(cluster1)

// switch to pod network
cluster1.RunInHost(false)
oa.UpgradeTidbClusterOrDie(cluster1)
oa.CheckTidbClusterStatusOrDie(cluster1)
}
fn2 := func(wg *sync.WaitGroup) {
defer wg.Done()
Expand Down

0 comments on commit e17d281

Please sign in to comment.