Skip to content

Commit

Permalink
cluster: add tidb dashboard
Browse files Browse the repository at this point in the history
  • Loading branch information
nexustar committed Sep 21, 2022
1 parent 0be11e9 commit 7818dd1
Show file tree
Hide file tree
Showing 7 changed files with 345 additions and 4 deletions.
18 changes: 18 additions & 0 deletions embed/examples/cluster/topology.example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,24 @@ kvcdc_servers:
data_dir: "/data1/tidb-data/tikv-cdc-8600"
log_dir: "/data1/tidb-deploy/tikv-cdc-8600/log"

# # Server configs are used to specify the configuration of TiDB Dashboard Servers.
tidb-dashboard_servers:
# # The ip address of the PD Server.
- host: 10.0.1.11
# # SSH port of the server.
# ssh_port: 22
# # port of TiDB Dashboard
# port: 2380
# # TiDB Dashboard deployment file, startup script, configuration file storage directory.
# deploy_dir: "/tidb-deploy/tidb-dashboard-23333"
# # PD Server data storage directory.
# data_dir: "/tidb-data/tidb-dashboard-23333"
# # PD Server log file storage directory.
# log_dir: "/tidb-deploy/tidb-dashboard-23333/log"
# # numa node bindings.
# numa_node: "0,1"


# # Server configs are used to specify the configuration of Prometheus Server.
monitoring_servers:
# # The ip address of the Monitoring Server.
Expand Down
36 changes: 36 additions & 0 deletions embed/templates/scripts/run_tidb-dashboard.sh.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#!/bin/bash
set -e

# WARNING: This file was auto-generated. Do not edit!
# All your edit might be overwritten!
DEPLOY_DIR={{.DeployDir}}

cd "${DEPLOY_DIR}" || exit 1

{{- define "PDList"}}
{{- range $idx, $pd := .}}
{{- if eq $idx 0}}
{{- $pd.Scheme}}://{{$pd.IP}}:{{$pd.ClientPort}}
{{- else -}}
,{{- $pd.Scheme}}://{{$pd.IP}}:{{$pd.ClientPort}}
{{- end}}
{{- end}}
{{- end}}

{{- if .NumaNode}}
exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/tidb-dashboard \
{{- else}}
exec bin/tidb-dashboard \
{{- end}}
--feature-version="{{.TidbVersion}}" \
--host="{{.IP}}" \
--port="{{.Port}}" \
--pd="{{template "PDList" .Endpoints}}" \
--data-dir="{{.DataDir}}" \
{{- if .TLSEnabled}}
--tidb-ca tls/ca.crt \
--tidb-cert tls/tidb-dashboard.crt \
--tidb-key tls/tidb-dasboard.pem \
{{- end}}
1>> "{{.LogDir}}/tidb-dashboard.log" \
2>> "{{.LogDir}}/tidb-dashboard_stderr.log"
209 changes: 209 additions & 0 deletions pkg/cluster/spec/dashboard.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.

package spec

import (
"context"
"crypto/tls"
"fmt"
"path/filepath"
"time"

"github.com/pingcap/tiup/pkg/cluster/ctxt"
"github.com/pingcap/tiup/pkg/cluster/template/scripts"
"github.com/pingcap/tiup/pkg/meta"
)

// DashboardSpec represents the Dashboard topology specification in topology.yaml
type DashboardSpec struct {
Host string `yaml:"host"`
SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"`
Version string `yaml:"version,omitempty"`
Patched bool `yaml:"patched,omitempty"`
IgnoreExporter bool `yaml:"ignore_exporter,omitempty"`
Port int `yaml:"port" default:"23333"`
DeployDir string `yaml:"deploy_dir,omitempty"`
DataDir string `yaml:"data_dir,omitempty"`
LogDir string `yaml:"log_dir,omitempty"`
NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"`
Config map[string]interface{} `yaml:"config,omitempty" validate:"config:ignore"`
ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"`
Arch string `yaml:"arch,omitempty"`
OS string `yaml:"os,omitempty"`
}

// Status queries current status of the instance
func (s *DashboardSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string {
if timeout < time.Second {
timeout = statusQueryTimeout
}

state := statusByHost(s.Host, s.Port, "/status", timeout, tlsCfg)

return state
}

// Role returns the component role of the instance
func (s *DashboardSpec) Role() string {
return ComponentDashboard
}

// SSH returns the host and SSH port of the instance
func (s *DashboardSpec) SSH() (string, int) {
return s.Host, s.SSHPort
}

// GetMainPort returns the main port of the instance
func (s *DashboardSpec) GetMainPort() int {
return s.Port
}

// IsImported returns if the node is imported from TiDB-Ansible
func (s *DashboardSpec) IsImported() bool {
// TiDB-Ansible do not support dashboard
return false
}

// IgnoreMonitorAgent returns if the node does not have monitor agents available
func (s *DashboardSpec) IgnoreMonitorAgent() bool {
return s.IgnoreExporter
}

// DashboardComponent represents Drainer component.
type DashboardComponent struct{ Topology *Specification }

// Name implements Component interface.
func (c *DashboardComponent) Name() string {
return ComponentDashboard
}

// Role implements Component interface.
func (c *DashboardComponent) Role() string {
return ComponentDashboard
}

// Instances implements Component interface.
func (c *DashboardComponent) Instances() []Instance {
ins := make([]Instance, 0, len(c.Topology.Drainers))
for _, s := range c.Topology.DashboardServers {
s := s
ins = append(ins, &DashboardInstance{BaseInstance{
InstanceSpec: s,
Name: c.Name(),
Host: s.Host,
Port: s.Port,
SSHP: s.SSHPort,

Ports: []int{
s.Port,
},
Dirs: []string{
s.DeployDir,
s.DataDir,
},
StatusFn: s.Status,
UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration {
return UptimeByHost(s.Host, s.Port, timeout, tlsCfg)
},
}, c.Topology})
}
return ins
}

// DashboardInstance represent the Ddashboard instance.
type DashboardInstance struct {
BaseInstance
topo Topology
}

// ScaleConfig deploy temporary config on scaling
func (i *DashboardInstance) ScaleConfig(
ctx context.Context,
e ctxt.Executor,
topo Topology,
clusterName,
clusterVersion,
user string,
paths meta.DirPaths,
) error {
s := i.topo
defer func() {
i.topo = s
}()
i.topo = mustBeClusterTopo(topo)

return i.InitConfig(ctx, e, clusterName, clusterVersion, user, paths)
}

// InitConfig implements Instance interface.
func (i *DashboardInstance) InitConfig(
ctx context.Context,
e ctxt.Executor,
clusterName,
clusterVersion,
deployUser string,
paths meta.DirPaths,
) error {
topo := i.topo.(*Specification)
if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil {
return err
}
enableTLS := topo.GlobalOptions.TLSEnabled
spec := i.InstanceSpec.(*DashboardSpec)

cfg := &scripts.DashboardScript{
TidbVersion: clusterVersion,
IP: i.GetHost(),
DeployDir: paths.Deploy,
DataDir: paths.Data[0],
LogDir: paths.Log,
Port: spec.Port,
NumaNode: spec.NumaNode,
Endpoints: topo.Endpoints(deployUser),
}

fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tidb-dashboard_%s_%d.sh", i.GetHost(), i.GetPort()))

if err := cfg.ConfigToFile(fp); err != nil {
return err
}
dst := filepath.Join(paths.Deploy, "scripts", "run_tidb-dashboard.sh")
if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil {
return err
}

_, _, err := e.Execute(ctx, "chmod +x "+dst, false)
if err != nil {
return err
}

globalConfig := topo.ServerConfigs.Dashboard

// set TLS configs
spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths)
if err != nil {
return err
}

if err := i.MergeServerConfig(ctx, e, globalConfig, spec.Config, paths); err != nil {
return err
}

return checkConfig(ctx, e, i.ComponentName(), clusterVersion, i.OS(), i.Arch(), i.ComponentName()+".toml", paths, nil)
}

// setTLSConfig set TLS Config to support enable/disable TLS
func (i *DashboardInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]interface{}, paths meta.DirPaths) (map[string]interface{}, error) {
return nil, nil
}
1 change: 1 addition & 0 deletions pkg/cluster/spec/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ const (
ComponentTiFlash = "tiflash"
ComponentGrafana = "grafana"
ComponentDrainer = "drainer"
ComponentDashboard = "tidb-dashboard"
ComponentPump = "pump"
ComponentCDC = "cdc"
ComponentTiKVCDC = "tikv-cdc"
Expand Down
13 changes: 9 additions & 4 deletions pkg/cluster/spec/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ type (
TiDB map[string]interface{} `yaml:"tidb"`
TiKV map[string]interface{} `yaml:"tikv"`
PD map[string]interface{} `yaml:"pd"`
Dashboard map[string]interface{} `yaml:"tidb-dashboard"`
TiFlash map[string]interface{} `yaml:"tiflash"`
TiFlashLearner map[string]interface{} `yaml:"tiflash-learner"`
Pump map[string]interface{} `yaml:"pump"`
Expand All @@ -125,6 +126,7 @@ type (
TiKVServers []*TiKVSpec `yaml:"tikv_servers"`
TiFlashServers []*TiFlashSpec `yaml:"tiflash_servers"`
PDServers []*PDSpec `yaml:"pd_servers"`
DashboardServers []*DashboardSpec `yaml:"tidb-dashboard_servers"`
PumpServers []*PumpSpec `yaml:"pump_servers,omitempty"`
Drainers []*DrainerSpec `yaml:"drainer_servers,omitempty"`
CDCServers []*CDCSpec `yaml:"cdc_servers,omitempty"`
Expand Down Expand Up @@ -483,6 +485,7 @@ func (s *Specification) Merge(that Topology) Topology {
TiDBServers: append(s.TiDBServers, spec.TiDBServers...),
TiKVServers: append(s.TiKVServers, spec.TiKVServers...),
PDServers: append(s.PDServers, spec.PDServers...),
DashboardServers: append(s.DashboardServers, spec.DashboardServers...),
TiFlashServers: append(s.TiFlashServers, spec.TiFlashServers...),
PumpServers: append(s.PumpServers, spec.PumpServers...),
Drainers: append(s.Drainers, spec.Drainers...),
Expand Down Expand Up @@ -577,7 +580,7 @@ func setCustomDefaults(globalOptions *GlobalOptions, field reflect.Value) error
clientPort := reflect.Indirect(field).FieldByName("ClientPort").Int()
field.Field(j).Set(reflect.ValueOf(fmt.Sprintf("pd-%s-%d", host, clientPort)))
case "DataDir":
if reflect.Indirect(field).FieldByName("Imported").Interface().(bool) {
if imported := reflect.Indirect(field).FieldByName("Imported"); imported.IsValid() && imported.Interface().(bool) {
setDefaultDir(globalOptions.DataDir, field.Addr().Interface().(InstanceSpec).Role(), getPort(field), field.Field(j))
}

Expand Down Expand Up @@ -610,7 +613,7 @@ func setCustomDefaults(globalOptions *GlobalOptions, field reflect.Value) error
case "DeployDir":
setDefaultDir(globalOptions.DeployDir, field.Addr().Interface().(InstanceSpec).Role(), getPort(field), field.Field(j))
case "LogDir":
if reflect.Indirect(field).FieldByName("Imported").Interface().(bool) {
if imported := reflect.Indirect(field).FieldByName("Imported"); imported.IsValid() && imported.Interface().(bool) {
setDefaultDir(globalOptions.LogDir, field.Addr().Interface().(InstanceSpec).Role(), getPort(field), field.Field(j))
}

Expand Down Expand Up @@ -691,8 +694,9 @@ func (s *Specification) ComponentsByStopOrder() (comps []Component) {

// ComponentsByStartOrder return component in the order need to start.
func (s *Specification) ComponentsByStartOrder() (comps []Component) {
// "pd", "tikv", "pump", "tidb", "tiflash", "drainer", "cdc", "tikv-cdc", "prometheus", "grafana", "alertmanager"
// "pd", "dashboard", "tikv", "pump", "tidb", "tiflash", "drainer", "cdc", "tikv-cdc", "prometheus", "grafana", "alertmanager"
comps = append(comps, &PDComponent{s})
comps = append(comps, &DashboardComponent{s})
comps = append(comps, &TiKVComponent{s})
comps = append(comps, &PumpComponent{s})
comps = append(comps, &TiDBComponent{s})
Expand All @@ -710,9 +714,10 @@ func (s *Specification) ComponentsByStartOrder() (comps []Component) {

// ComponentsByUpdateOrder return component in the order need to be updated.
func (s *Specification) ComponentsByUpdateOrder() (comps []Component) {
// "tiflash", "pd", "tikv", "pump", "tidb", "drainer", "cdc", "prometheus", "grafana", "alertmanager"
// "tiflash", "pd", "dashboard", "tikv", "pump", "tidb", "drainer", "cdc", "prometheus", "grafana", "alertmanager"
comps = append(comps, &TiFlashComponent{s})
comps = append(comps, &PDComponent{s})
comps = append(comps, &DashboardComponent{s})
comps = append(comps, &TiKVComponent{s})
comps = append(comps, &PumpComponent{s})
comps = append(comps, &TiDBComponent{s})
Expand Down
9 changes: 9 additions & 0 deletions pkg/cluster/task/update_meta.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,15 @@ func (u *UpdateMeta) Execute(ctx context.Context) error {
}
newMeta.Topology.PDServers = pdServers

dashboardServers := make([]*spec.DashboardSpec, 0)
for i, instance := range (&spec.DashboardComponent{Topology: topo}).Instances() {
if deleted.Exist(instance.ID()) {
continue
}
dashboardServers = append(dashboardServers, topo.DashboardServers[i])
}
topo.DashboardServers = dashboardServers

tiflashServers := make([]*spec.TiFlashSpec, 0)
for i, instance := range (&spec.TiFlashComponent{Topology: topo}).Instances() {
if deleted.Exist(instance.ID()) {
Expand Down
Loading

0 comments on commit 7818dd1

Please sign in to comment.