diff --git a/dialout/dialout_client/dialout_client.go b/dialout/dialout_client/dialout_client.go index ed4cfd009..ec9f1bef8 100644 --- a/dialout/dialout_client/dialout_client.go +++ b/dialout/dialout_client/dialout_client.go @@ -21,6 +21,7 @@ import ( "strings" "sync" "time" + "os" ) const ( @@ -94,7 +95,6 @@ func (d Destination) Validate() error { // Global config for all clients type ClientConfig struct { - SrcIp string RetryInterval time.Duration Encoding gpb.Encoding Unidirectional bool // by default, no reponse from remote server @@ -127,6 +127,7 @@ type clientSubscription struct { sendMsg uint64 recvMsg uint64 errors uint64 + restart bool //whether need restart connect to server flag } // Client handles execution of the telemetry publish service. @@ -263,9 +264,14 @@ func newClient(ctx context.Context, dest Destination) (*Client, error) { opts := []grpc.DialOption{ grpc.WithBlock(), } + if clientCfg.TLS != nil { opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(clientCfg.TLS))) + } else { + opts = append(opts, grpc.WithInsecure()) + log.V(2).Infof("gRPC without TLS") } + conn, err := grpc.DialContext(ctx, dest.Addrs, opts...) if err != nil { return nil, fmt.Errorf("Dial to (%s, timeout %v): %v", dest, timeout, err) @@ -297,6 +303,7 @@ restart: //Remote server might go down, in that case we restart with next destin cs.q = queue.NewPriorityQueue(1, false) cs.opened = true cs.client = nil + cs.restart = false cs.cMu.Unlock() cs.conTryCnt++ @@ -337,48 +344,54 @@ restart: //Remote server might go down, in that case we restart with next destin switch cs.reportType { case Periodic: + ticker := time.NewTicker(cs.interval) for { select { - default: - spbValues, err := cs.dc.Get(nil) - if err != nil { - // TODO: need to inform - log.V(2).Infof("Data read error %v for %v", err, cs) - continue - //return nil, status.Error(codes.NotFound, err.Error()) - } - var updates []*gpb.Update - var spbValue *spb.Value - for _, spbValue = range spbValues { - update := &gpb.Update{ - Path: spbValue.GetPath(), - Val: spbValue.GetVal(), + case <-ticker.C: + go func() { + spbValues, err := cs.dc.Get(nil) + if err != nil { + // TODO: need to inform + log.V(2).Infof("Data read error %v for %v", err, cs) + //continue + //return nil, status.Error(codes.NotFound, err.Error()) + return } - updates = append(updates, update) - } - rs := &gpb.SubscribeResponse_Update{ - Update: &gpb.Notification{ - Timestamp: spbValue.GetTimestamp(), - Prefix: cs.prefix, - Update: updates, - }, - } - response := &gpb.SubscribeResponse{Response: rs} - - log.V(6).Infof("cs %s sending \n\t%v \n To %s", cs.name, response, dest) - err = pub.Send(response) - if err != nil { - log.V(1).Infof("Client %v pub Send error:%v, cs.conTryCnt %v", cs.name, err, cs.conTryCnt) - cs.Close() - // Retry - goto restart - } - log.V(6).Infof("cs %s to %s done", cs.name, dest) - cs.sendMsg++ - c.sendMsg++ + var updates []*gpb.Update + var spbValue *spb.Value + for _, spbValue = range spbValues { + update := &gpb.Update{ + Path: spbValue.GetPath(), + Val: spbValue.GetVal(), + } + updates = append(updates, update) + } + rs := &gpb.SubscribeResponse_Update{ + Update: &gpb.Notification{ + Timestamp: spbValue.GetTimestamp(), + Prefix: cs.prefix, + Update: updates, + }, + } + response := &gpb.SubscribeResponse{Response: rs} - time.Sleep(cs.interval) + log.V(6).Infof("cs %s sending \n\t%v \n To %s", cs.name, response, dest) + err = pub.Send(response) + if err != nil { + log.V(1).Infof("Client %v pub Send error:%v, cs.conTryCnt %v", cs.name, err, cs.conTryCnt) + cs.restart = true + cs.Close() + // Retry + //goto restart + } + log.V(6).Infof("cs %s to %s done", cs.name, dest) + cs.sendMsg++ + c.sendMsg++ + }() case <-cs.stop: + if cs.restart == true { + goto restart + } log.V(1).Infof("%v exiting publishRun routine for destination %s", cs, dest) return } @@ -470,6 +483,7 @@ func processTelemetryClientConfig(ctx context.Context, redisDb *redis.Client, ke } log.V(2).Infof("Processing %v %v", tableKey, fv) + configMu.Lock() defer configMu.Unlock() @@ -482,8 +496,6 @@ func processTelemetryClientConfig(ctx context.Context, redisDb *redis.Client, ke } else { for field, value := range fv { switch field { - case "src_ip": - clientCfg.SrcIp = value case "retry_interval": //TODO: check validity of the interval itvl, err := strconv.ParseUint(value, 10, 64) @@ -595,17 +607,25 @@ func processTelemetryClientConfig(ctx context.Context, redisDb *redis.Client, ke cs.prefix = &gpb.Path{ Target: value, } + deviceName, err := os.Hostname() + if err != nil { + log.V(2).Infof("Get hostname error: %v", err) + } else { + cs.prefix.Origin = deviceName + } case "paths": - paths := strings.Split(value, ",") - for _, path := range paths { - pp, err := ygot.StringToPath(path, ygot.StructuredPath) + ps := strings.Split(value, ",") + newPaths := []*gpb.Path{} + for _, p := range ps { + pp, err := ygot.StringToPath(p, ygot.StructuredPath) if err != nil { log.V(2).Infof("Invalid paths %v", value) return fmt.Errorf("Invalid paths %v", value) } // append *gpb.Path - cs.paths = append(cs.paths, pp) + newPaths = append(newPaths, pp) } + cs.paths = newPaths default: log.V(2).Infof("Invalid field %v value %v", field, value) return fmt.Errorf("Invalid field %v value %v", field, value) diff --git a/dialout/dialout_client/dialout_client_test.go b/dialout/dialout_client/dialout_client_test.go index 23290f83a..2fb6f67d1 100644 --- a/dialout/dialout_client/dialout_client_test.go +++ b/dialout/dialout_client/dialout_client_test.go @@ -144,7 +144,7 @@ func prepareDb(t *testing.T) { mpi_name_map := loadConfig(t, "COUNTERS_PORT_NAME_MAP", countersPortNameMapByte) loadDB(t, rclient, mpi_name_map) - fileName = "../../testdata/COUNTERS:Ethernet68.txt" + fileName = "../../testdata/COUNTERS-Ethernet68.txt" countersEthernet68Byte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -153,7 +153,7 @@ func prepareDb(t *testing.T) { mpi_counter := loadConfig(t, "COUNTERS:oid:0x1000000000039", countersEthernet68Byte) loadDB(t, rclient, mpi_counter) - fileName = "../../testdata/COUNTERS:Ethernet1.txt" + fileName = "../../testdata/COUNTERS-Ethernet1.txt" countersEthernet1Byte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -163,7 +163,7 @@ func prepareDb(t *testing.T) { loadDB(t, rclient, mpi_counter) // "Ethernet64:0": "oid:0x1500000000092a" : queue counter, to work as data noise - fileName = "../../testdata/COUNTERS:oid:0x1500000000092a.txt" + fileName = "../../testdata/COUNTERS-oid-0x1500000000092a.txt" counters92aByte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -265,14 +265,14 @@ func TestGNMIDialOutPublish(t *testing.T) { } _ = countersPortNameMapByte - fileName = "../../testdata/COUNTERS:Ethernet68.txt" + fileName = "../../testdata/COUNTERS-Ethernet68.txt" countersEthernet68Byte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) } _ = countersEthernet68Byte - fileName = "../../testdata/COUNTERS:Ethernet_wildcard.txt" + fileName = "../../testdata/COUNTERS-Ethernet_wildcard.txt" countersEthernetWildcardByte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -280,7 +280,7 @@ func TestGNMIDialOutPublish(t *testing.T) { _ = countersEthernetWildcardByte - fileName = "../../testdata/COUNTERS:Ethernet_wildcard_PFC_7_RX.txt" + fileName = "../../testdata/COUNTERS-Ethernet_wildcard_PFC_7_RX.txt" countersEthernetWildcardPfcByte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -289,7 +289,6 @@ func TestGNMIDialOutPublish(t *testing.T) { _ = countersEthernetWildcardPfcByte clientCfg := ClientConfig{ - SrcIp: "", RetryInterval: 5 * time.Second, Encoding: pb.Encoding_JSON_IETF, Unidirectional: true, diff --git a/dialout/dialout_client_cli/dialout_client_cli.go b/dialout/dialout_client_cli/dialout_client_cli.go index d8d9014da..337f0290a 100644 --- a/dialout/dialout_client_cli/dialout_client_cli.go +++ b/dialout/dialout_client_cli/dialout_client_cli.go @@ -15,17 +15,20 @@ import ( var ( clientCfg = dc.ClientConfig{ - SrcIp: "", RetryInterval: 30 * time.Second, Encoding: gpb.Encoding_JSON_IETF, Unidirectional: true, - TLS: &tls.Config{}, } + + tlsCfg = tls.Config{} + + tlsDisable bool ) func init() { - flag.StringVar(&clientCfg.TLS.ServerName, "server_name", "", "When set, use this hostname to verify server certificate during TLS handshake.") - flag.BoolVar(&clientCfg.TLS.InsecureSkipVerify, "insecure", false, "When set, client will not verify the server certificate during TLS handshake.") + flag.StringVar(&tlsCfg.ServerName, "server_name", "", "When set, use this hostname to verify server certificate during TLS handshake.") + flag.BoolVar(&tlsCfg.InsecureSkipVerify, "skip_verify", false, "When set, client will not verify the server certificate during TLS handshake.") + flag.BoolVar(&tlsDisable, "insecure", false, "Without TLS, only for testing") flag.DurationVar(&clientCfg.RetryInterval, "retry_interval", 30*time.Second, "Interval at which client tries to reconnect to destination servers") flag.BoolVar(&clientCfg.Unidirectional, "unidirectional", true, "No repesponse from server is expected") } @@ -41,6 +44,10 @@ func main() { cancel() }() log.V(1).Infof("Starting telemetry publish client") + if !tlsDisable { + clientCfg.TLS = &tlsCfg + log.V(1).Infof("TLS enable") + } err := dc.DialOutRun(ctx, &clientCfg) log.V(1).Infof("Exiting telemetry publish client: %v", err) log.Flush() diff --git a/dialout/dialout_server_cli/dialout_server_cli.go b/dialout/dialout_server_cli/dialout_server_cli.go index 02b9cee06..4ac60b96e 100644 --- a/dialout/dialout_server_cli/dialout_server_cli.go +++ b/dialout/dialout_server_cli/dialout_server_cli.go @@ -11,7 +11,6 @@ import ( "google.golang.org/grpc/credentials" ds "github.com/Azure/sonic-telemetry/dialout/dialout_server" - testcert "github.com/Azure/sonic-telemetry/testdata/tls" ) var ( @@ -20,7 +19,7 @@ var ( caCert = flag.String("ca_crt", "", "CA certificate for client certificate validation. Optional.") serverCert = flag.String("server_crt", "", "TLS server certificate") serverKey = flag.String("server_key", "", "TLS server private key") - insecure = flag.Bool("insecure", false, "Skip providing TLS cert and key, for testing only!") + insecure = flag.Bool("insecure", false, "Without TLS, for testing only!") allowNoClientCert = flag.Bool("allow_no_client_auth", false, "When set, telemetry server will request but not require a client certificate.") ) @@ -35,12 +34,8 @@ func main() { var certificate tls.Certificate var err error - if *insecure { - certificate, err = testcert.NewCert() - if err != nil { - log.Exitf("could not load server key pair: %s", err) - } - } else { + var opts []grpc.ServerOption + if !*insecure { switch { case *serverCert == "": log.Errorf("serverCert must be set.") @@ -53,32 +48,32 @@ func main() { if err != nil { log.Exitf("could not load server key pair: %s", err) } - } - - tlsCfg := &tls.Config{ - ClientAuth: tls.RequireAndVerifyClientCert, - Certificates: []tls.Certificate{certificate}, - } - if *allowNoClientCert { - // RequestClientCert will ask client for a certificate but won't - // require it to proceed. If certificate is provided, it will be - // verified. - tlsCfg.ClientAuth = tls.RequestClientCert - } - if *caCert != "" { - ca, err := ioutil.ReadFile(*caCert) - if err != nil { - log.Exitf("could not read CA certificate: %s", err) + tlsCfg := &tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + Certificates: []tls.Certificate{certificate}, } - certPool := x509.NewCertPool() - if ok := certPool.AppendCertsFromPEM(ca); !ok { - log.Exit("failed to append CA certificate") + if *allowNoClientCert { + // RequestClientCert will ask client for a certificate but won't + // require it to proceed. If certificate is provided, it will be + // verified. + tlsCfg.ClientAuth = tls.RequestClientCert + } + + if *caCert != "" { + ca, err := ioutil.ReadFile(*caCert) + if err != nil { + log.Exitf("could not read CA certificate: %s", err) + } + certPool := x509.NewCertPool() + if ok := certPool.AppendCertsFromPEM(ca); !ok { + log.Exit("failed to append CA certificate") + } + tlsCfg.ClientCAs = certPool } - tlsCfg.ClientCAs = certPool + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(tlsCfg))} } - opts := []grpc.ServerOption{grpc.Creds(credentials.NewTLS(tlsCfg))} cfg := &ds.Config{} cfg.Port = int64(*port) s, err := ds.NewServer(cfg, opts) diff --git a/glide.lock b/glide.lock new file mode 100644 index 000000000..1ac79659c --- /dev/null +++ b/glide.lock @@ -0,0 +1,135 @@ +hash: 772c1deaf69068d627ca711e1514d0c12e43134c32a5a95cf975afbcb3dba6af +updated: 2018-05-16T02:27:45.106673837-07:00 +imports: +- name: github.com/c9s/goprocinfo + version: 0010a05ce49fde7f50669bc7ecda7d41dd6ab824 + subpackages: + - linux +- name: github.com/go-ole/go-ole + version: a1ec82a652ebc7e784d7df887cfb31061aeabbcc + subpackages: + - oleutil +- name: github.com/go-redis/redis + version: 877867d2845fbaf86798befe410b6ceb6f5c29a3 + subpackages: + - internal + - internal/consistenthash + - internal/hashtag + - internal/pool + - internal/proto + - internal/singleflight + - internal/util +- name: github.com/go-yaml/yaml + version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 +- name: github.com/golang/glog + version: 23def4e6c14b4da8ac2ed8007337bc5eb5007998 +- name: github.com/golang/protobuf + version: b4deda0973fb4c70b50d226b1af49f3da59f5265 + subpackages: + - proto + - protoc-gen-go/descriptor + - ptypes + - ptypes/any + - ptypes/duration + - ptypes/timestamp +- name: github.com/google/gnxi + version: 4cc9f27e4d5fee0a952a7490906d2493f0bd6c79 + subpackages: + - utils +- name: github.com/kylelemons/godebug + version: d65d576e9348f5982d7f6d83682b694e731a45c6 + subpackages: + - diff + - pretty +- name: github.com/openconfig/gnmi + version: a0e319ea17b69501b2b287db91149ed93e5e3ebc + subpackages: + - client + - client/gnmi + - client/grpcutil + - ctree + - errlist + - proto/gnmi + - proto/gnmi_ext + - value +- name: github.com/openconfig/goyang + version: b6e1e28273ad274f9240361b3a1ae65d27c045ac + subpackages: + - pkg/indent + - pkg/yang +- name: github.com/openconfig/ygot + version: b71b4131ec632fdb387e35dfe635182ad2368c1f + subpackages: + - util + - ygot +- name: github.com/shirou/gopsutil + version: c95755e4bcd7a62bb8bd33f3a597a7c7f35e2cf3 + subpackages: + - disk + - internal/common + - mem +- name: github.com/StackExchange/wmi + version: cdffdb33acae0e14efff2628f9bae377b597840e +- name: github.com/workiva/go-datastructures + version: 01f52d4880ca171ab1033bb0ee03cda8e0a31cc3 + subpackages: + - queue +- name: golang.org/x/net + version: 61147c48b25b599e5b561d2e9c4f3e1ef489ca41 + subpackages: + - context + - http2 + - http2/hpack + - idna + - internal/timeseries + - lex/httplex + - trace +- name: golang.org/x/sys + version: 7c87d13f8e835d2fb3a70a2912c811ed0c1d241b + subpackages: + - unix + - windows +- name: golang.org/x/text + version: 2cb43934f0eece38629746959acc633cba083fe4 + subpackages: + - secure/bidirule + - transform + - unicode/bidi + - unicode/norm +- name: google.golang.org/genproto + version: ce84044298496ef4b54b4a0a0909ba593cc60e30 + subpackages: + - googleapis/rpc/status +- name: google.golang.org/grpc + version: 41344da2231b913fa3d983840a57a6b1b7b631a1 + subpackages: + - balancer + - balancer/base + - balancer/roundrobin + - channelz + - codes + - connectivity + - credentials + - encoding + - encoding/proto + - grpclb/grpc_lb_v1/messages + - grpclog + - internal + - keepalive + - metadata + - naming + - peer + - reflection + - reflection/grpc_reflection_v1alpha + - resolver + - resolver/dns + - resolver/passthrough + - stats + - status + - tap + - transport +testImports: +- name: github.com/jipanyang/gnmi + version: cb4d464fa018c29eadab98281448000bee4dcc3d + subpackages: + - client/gnmi diff --git a/glide.yaml b/glide.yaml new file mode 100644 index 000000000..45d96068d --- /dev/null +++ b/glide.yaml @@ -0,0 +1,49 @@ +package: github.com/Azure/sonic-telemetry +import: +- package: github.com/c9s/goprocinfo + subpackages: + - linux +- package: github.com/go-redis/redis + version: ^6.10.2 +- package: github.com/golang/glog +- package: github.com/golang/protobuf + version: ^1.0.0 + subpackages: + - proto +- package: github.com/google/gnxi + subpackages: + - utils +- package: github.com/openconfig/gnmi + subpackages: + - proto/gnmi +- package: github.com/openconfig/ygot + version: ^0.5.0 + subpackages: + - ygot +- package: github.com/workiva/go-datastructures + version: ^1.0.44 + subpackages: + - queue +- package: golang.org/x/net + subpackages: + - context + - trace +- package: google.golang.org/grpc + version: ^1.11.2 + subpackages: + - codes + - credentials + - peer + - reflection + - status +- package: github.com/shirou/gopsutil + version: ~2.18.4 +- package: github.com/go-yaml/yaml + version: ~2.2.1 +testImport: +- package: github.com/jipanyang/gnmi + subpackages: + - client/gnmi +- package: github.com/kylelemons/godebug + subpackages: + - pretty diff --git a/gnmi_server/server.go b/gnmi_server/server.go index a52353dea..2273b759a 100644 --- a/gnmi_server/server.go +++ b/gnmi_server/server.go @@ -1,11 +1,14 @@ package gnmi import ( + "encoding/json" "errors" "fmt" "net" + "strconv" "strings" "sync" + "time" log "github.com/golang/glog" "golang.org/x/net/context" @@ -202,9 +205,132 @@ func (s *Server) Get(ctx context.Context, req *gnmipb.GetRequest) (*gnmipb.GetRe return &gnmipb.GetResponse{Notification: notifications}, nil } -// Set method is not implemented. Refer to gnxi for examples with openconfig integration -func (srv *Server) Set(context.Context, *gnmipb.SetRequest) (*gnmipb.SetResponse, error) { - return nil, grpc.Errorf(codes.Unimplemented, "Set() is not implemented") +// Get string value from TypedValue in gnmipb +func getUpdateVal(t *gnmipb.TypedValue) (interface{}, error) { + switch t.Value.(type) { + case *gnmipb.TypedValue_IntVal: + return strconv.FormatInt(t.GetIntVal(), 10), nil + case *gnmipb.TypedValue_StringVal: + return t.GetStringVal(), nil + case *gnmipb.TypedValue_JsonIetfVal: + var f interface{} + err := json.Unmarshal(t.GetJsonIetfVal(), &f) + if err != nil { + return "", fmt.Errorf("typedValue: %v not json", t) + } + m := f.(map[string]interface{}) + fv := make(map[string]string) + fv1 := make(map[string]map[string]interface{}) + mapflag := false + for k, v := range m { + fv2 := make(map[string]interface{}) + switch v.(type) { + case string: + fv[k] = v.(string) + case int: + fv[k] = strconv.FormatInt(int64(v.(int)), 10) + case float64: + fv[k] = strconv.FormatFloat(v.(float64), 'E', -1, 64) + case bool: + fv[k] = strconv.FormatBool(v.(bool)) + case map[string]interface{}: + m1 := v.(map[string]interface{}) + for k1, v1 := range m1 { + fv2[k1] = v1 + } + fv1[k] = fv2 + mapflag = true + default: + return "", fmt.Errorf("typedValue: %v field %v type not supported", t, k) + } + } + if mapflag == true { + return fv1, nil + } else { + return fv, nil + } + default: + return "", fmt.Errorf("typedValue: %v type not supported", t) + } +} + +// Set implements the Get RPC in gNMI spec. +func (srv *Server) Set(ctx context.Context, req *gnmipb.SetRequest) (*gnmipb.SetResponse, error) { + var target string + prefix := req.GetPrefix() + if prefix == nil { + return nil, status.Error(codes.Unimplemented, "No target specified in prefix") + } + + target = prefix.GetTarget() + if target == "" { + return nil, status.Error(codes.Unimplemented, "Empty target data not supported yet") + } + + // only support set config_db + if target != "CONFIG_DB" { + return nil, status.Errorf(codes.Unimplemented, "unsupported request target") + } + + log.V(6).Infof("SetRequest: %v", req) + var results []*gnmipb.UpdateResult + dc, err := sdc.NewDbClient(nil, prefix) + if err != nil { + return nil, status.Error(codes.NotFound, err.Error()) + } + + for _, path := range req.GetDelete() { + log.V(2).Infof("Delete path: %v", path) + err := dc.Set(path, "", true) + if err != nil { + return nil, err + } + res := gnmipb.UpdateResult{ + Path: path, + Op: gnmipb.UpdateResult_DELETE, + } + results = append(results, &res) + } + + for _, path := range req.GetReplace() { + val, err := getUpdateVal(path.GetVal()) + if err != nil { + return nil, err + } + log.V(2).Infof("Replace path: %v val: %v", path, val) + err = dc.Set(path.GetPath(), val, true) + if err != nil { + return nil, err + } + res := gnmipb.UpdateResult{ + Path: path.GetPath(), + Op: gnmipb.UpdateResult_REPLACE, + } + results = append(results, &res) + } + + for _, path := range req.GetUpdate() { + val, err := getUpdateVal(path.GetVal()) + if err != nil { + return nil, err + } + log.V(2).Infof("Update path: %v val: %v", path, val) + err = dc.Set(path.GetPath(), val, false) + if err != nil { + return nil, err + } + res := gnmipb.UpdateResult{ + Path: path.GetPath(), + Op: gnmipb.UpdateResult_UPDATE, + } + results = append(results, &res) + } + + return &gnmipb.SetResponse{ + Timestamp: time.Now().UnixNano(), + Prefix: req.GetPrefix(), + Response: results, + }, nil } // Capabilities method is not implemented. Refer to gnxi for examples with openconfig integration diff --git a/gnmi_server/server_test.go b/gnmi_server/server_test.go index 4d48c96a8..8d9b73b6b 100644 --- a/gnmi_server/server_test.go +++ b/gnmi_server/server_test.go @@ -10,6 +10,7 @@ import ( "github.com/go-redis/redis" "github.com/golang/protobuf/proto" + "flag" "github.com/kylelemons/godebug/pretty" "github.com/openconfig/gnmi/client" pb "github.com/openconfig/gnmi/proto/gnmi" @@ -198,7 +199,7 @@ func prepareDb(t *testing.T) { mpi_qname_map := loadConfig(t, "COUNTERS_QUEUE_NAME_MAP", countersQueueNameMapByte) loadDB(t, rclient, mpi_qname_map) - fileName = "../testdata/COUNTERS:Ethernet68.txt" + fileName = "../testdata/COUNTERS-Ethernet68.txt" countersEthernet68Byte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -207,7 +208,7 @@ func prepareDb(t *testing.T) { mpi_counter := loadConfig(t, "COUNTERS:oid:0x1000000000039", countersEthernet68Byte) loadDB(t, rclient, mpi_counter) - fileName = "../testdata/COUNTERS:Ethernet1.txt" + fileName = "../testdata/COUNTERS-Ethernet1.txt" countersEthernet1Byte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -217,7 +218,7 @@ func prepareDb(t *testing.T) { loadDB(t, rclient, mpi_counter) // "Ethernet64:0": "oid:0x1500000000092a" : queue counter, to work as data noise - fileName = "../testdata/COUNTERS:oid:0x1500000000092a.txt" + fileName = "../testdata/COUNTERS-oid-0x1500000000092a.txt" counters92aByte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -226,7 +227,7 @@ func prepareDb(t *testing.T) { loadDB(t, rclient, mpi_counter) // "Ethernet68:1": "oid:0x1500000000091c" : queue counter, for COUNTERS/Ethernet68/Queue vpath test - fileName = "../testdata/COUNTERS:oid:0x1500000000091c.txt" + fileName = "../testdata/COUNTERS-oid-0x1500000000091c.txt" countersEeth68_1Byte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -236,6 +237,9 @@ func prepareDb(t *testing.T) { } func TestGnmiGet(t *testing.T) { + flag.Set("alsologtostderr", "true") + //flag.Set("v", "6") + flag.Parse() //t.Log("Start server") s := createServer(t) go runServer(t, s) @@ -264,19 +268,19 @@ func TestGnmiGet(t *testing.T) { t.Fatalf("read file %v err: %v", fileName, err) } - fileName = "../testdata/COUNTERS:Ethernet68.txt" + fileName = "../testdata/COUNTERS-Ethernet68.txt" countersEthernet68Byte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) } - fileName = "../testdata/COUNTERS:Ethernet_wildcard.txt" + fileName = "../testdata/COUNTERS-Ethernet_wildcard.txt" countersEthernetWildcardByte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) } - fileName = "../testdata/COUNTERS:Ethernet_wildcard_PFC_7_RX.txt" + fileName = "../testdata/COUNTERS-Ethernet_wildcard_PFC_7_RX.txt" countersEthernetWildcardPfcByte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -391,7 +395,7 @@ func runTestSubscribe(t *testing.T) { countersPortNameMapJsonUpdate["test_field"] = "test_value" // for table key subscription - fileName = "../testdata/COUNTERS:Ethernet68.txt" + fileName = "../testdata/COUNTERS-Ethernet68.txt" countersEthernet68Byte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -410,7 +414,7 @@ func runTestSubscribe(t *testing.T) { // field SAI_PORT_STAT_PFC_7_RX_PKTS has new value of 4 countersEthernet68JsonPfcUpdate["SAI_PORT_STAT_PFC_7_RX_PKTS"] = "4" - fileName = "../testdata/COUNTERS:Ethernet_wildcard.txt" + fileName = "../testdata/COUNTERS-Ethernet_wildcard.txt" countersEthernetWildcardByte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -425,7 +429,7 @@ func runTestSubscribe(t *testing.T) { json.Unmarshal(countersEthernetWildcardByte, &countersFieldUpdate) countersFieldUpdate["Ethernet68"] = countersEthernet68JsonPfcUpdate - fileName = "../testdata/COUNTERS:Ethernet_wildcard_PFC_7_RX.txt" + fileName = "../testdata/COUNTERS-Ethernet_wildcard_PFC_7_RX.txt" countersEthernetWildcardPfcByte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -442,7 +446,7 @@ func runTestSubscribe(t *testing.T) { //allPortPfcJsonUpdate := countersEthernetWildcardPfcJson.(map[string]interface{}) allPortPfcJsonUpdate["Ethernet68"] = pfc7Map - fileName = "../testdata/COUNTERS:Ethernet_wildcard_Queues.txt" + fileName = "../testdata/COUNTERS-Ethernet_wildcard_Queues.txt" countersEthernetWildQueuesByte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -450,7 +454,7 @@ func runTestSubscribe(t *testing.T) { var countersEthernetWildQueuesJson interface{} json.Unmarshal(countersEthernetWildQueuesByte, &countersEthernetWildQueuesJson) - fileName = "../testdata/COUNTERS:Ethernet68:Queues.txt" + fileName = "../testdata/COUNTERS-Ethernet68-Queues.txt" countersEthernet68QueuesByte, err := ioutil.ReadFile(fileName) if err != nil { t.Fatalf("read file %v err: %v", fileName, err) @@ -910,6 +914,442 @@ func TestGnmiSubscribe(t *testing.T) { s.s.Stop() } +type SimpleSRequest struct { + target string + deletePath []string + replacePath []string + replaceVal pb.TypedValue + updatePath []string + updateVal pb.TypedValue +} + +// Construct SetRequest +func newSetRequest(sr *SimpleSRequest) *pb.SetRequest { + pf := pb.Path{ + Target: sr.target, + } + + var dPath []*pb.Path + if len(sr.deletePath) >= 1 { + p := pb.Path{ + Elem: []*pb.PathElem{}, + } + for _, e := range sr.deletePath { + pe := pb.PathElem{Name: e} + p.Elem = append(p.Elem, &pe) + } + dPath = append(dPath, &p) + } + + var rUpdate []*pb.Update + if len(sr.replacePath) >= 1 { + p := pb.Path{ + Elem: []*pb.PathElem{}, + } + for _, e := range sr.replacePath { + pe := pb.PathElem{Name: e} + p.Elem = append(p.Elem, &pe) + } + u := pb.Update{ + Path: &p, + Val: &sr.replaceVal, + } + rUpdate = append(rUpdate, &u) + } + + var uUpdate []*pb.Update + if len(sr.updatePath) >= 1 { + p := pb.Path{ + Elem: []*pb.PathElem{}, + } + for _, e := range sr.updatePath { + pe := pb.PathElem{Name: e} + p.Elem = append(p.Elem, &pe) + } + u := pb.Update{ + Path: &p, + Val: &sr.updateVal, + } + uUpdate = append(uUpdate, &u) + } + + srq := &pb.SetRequest{ + Prefix: &pf, + Delete: dPath, + Replace: rUpdate, + Update: uUpdate, + } + return srq +} + +type SimpleSResponse struct { + target string + path []string + op int32 +} + +// Check setResponse +func compareSetResponse(sr *pb.SetResponse, simple *SimpleSResponse) bool { + pf := sr.GetPrefix() + if pf.GetTarget() != simple.target { + return false + } + r := sr.GetResponse() + if len(r) != 1 { + return false + } + for _, ur := range r { + if int32(ur.GetOp()) != simple.op { + return false + } + + p := ur.GetPath() + if len(p.GetElem()) != len(simple.path) { + return false + } + for i, e := range p.GetElem() { + if e.Name != simple.path[i] { + return false + } + } + } + return true +} + +type tblFV struct { + tbl string + f string + v interface{} +} + +func TestGnmiSet(t *testing.T) { + //flag.Set("alsologtostderr", "true") + //flag.Set("v", "6") + //flag.Parse() + s := createServer(t) + go runServer(t, s) + + tlsConfig := &tls.Config{InsecureSkipVerify: true} + opts := []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))} + + targetAddr := "127.0.0.1:8080" + conn, err := grpc.Dial(targetAddr, opts...) + if err != nil { + t.Fatalf("Dialing to %q failed: %v", targetAddr, err) + } + defer conn.Close() + + gClient := pb.NewGNMIClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Only support CONFIG_DB + dbn := spb.Target_value["CONFIG_DB"] + rclient := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "localhost:6379", + Password: "", // no password set + DB: int(dbn), + DialTimeout: 0, + }) + _, err = rclient.Ping().Result() + if err != nil { + t.Fatalf("failed to connect to redis server %v", err) + } + defer rclient.Close() + jVal := `{"retry_interval":"60", "encoding":"JSON_IETF", "unidirectional":true}` + jByte := []byte(jVal) + + jVal1 := `{"Vlan10":{"admin_status":"up","description":"vlan_trunk","mtu":"1200"},"Vlan20":{"admin_status":"down","mtu":"9180"}}` + jByte1 := []byte(jVal1) + + jVal2 := `{"Vlan20":{"mtu":"1200"}}` + jByte2 := []byte(jVal2) + + tests := []struct { + desc string + in SimpleSRequest + want SimpleSResponse + wantFV tblFV + wantErr string + }{ + { + desc: "Delete path", + in: SimpleSRequest{ + target: "CONFIG_DB", + deletePath: []string{"TELEMETRY_CLIENT", "Global", "retry_interval"}, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"TELEMETRY_CLIENT", "Global", "retry_interval"}, + op: int32(pb.UpdateResult_DELETE), + }, + wantFV: tblFV{ + tbl: "TELEMETRY_CLIENT|Global", + f: "retry_interval", + v: "", + }, + }, + { + desc: "Update path value int", + in: SimpleSRequest{ + target: "CONFIG_DB", + updatePath: []string{"TELEMETRY_CLIENT", "Global", "retry_interval"}, + updateVal: pb.TypedValue{ + Value: &pb.TypedValue_IntVal{5}, + }, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"TELEMETRY_CLIENT", "Global", "retry_interval"}, + op: int32(pb.UpdateResult_UPDATE), + }, + wantFV: tblFV{ + tbl: "TELEMETRY_CLIENT|Global", + f: "retry_interval", + v: "5", + }, + }, + { + desc: "Update path value string", + in: SimpleSRequest{ + target: "CONFIG_DB", + updatePath: []string{"TELEMETRY_CLIENT", "DestinationGroup_TEST", "dst_addr"}, + updateVal: pb.TypedValue{ + Value: &pb.TypedValue_StringVal{"20.20.20.20:8081"}, + }, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"TELEMETRY_CLIENT", "DestinationGroup_TEST", "dst_addr"}, + op: int32(pb.UpdateResult_UPDATE), + }, + wantFV: tblFV{ + tbl: "TELEMETRY_CLIENT|DestinationGroup_TEST", + f: "dst_addr", + v: "20.20.20.20:8081", + }, + }, + { + desc: "Update path value json", + in: SimpleSRequest{ + target: "CONFIG_DB", + updatePath: []string{"TELEMETRY_CLIENT", "Global"}, + updateVal: pb.TypedValue{ + Value: &pb.TypedValue_JsonIetfVal{jByte}, + }, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"TELEMETRY_CLIENT", "Global"}, + op: int32(pb.UpdateResult_UPDATE), + }, + wantFV: tblFV{ + tbl: "TELEMETRY_CLIENT|Global", + f: "", + v: map[string]string{ + "retry_interval": "60", + "encoding": "JSON_IETF", + "unidirectional": "true", + }, + }, + }, + { + desc: "Replace path value json mapmap add", + in: SimpleSRequest{ + target: "CONFIG_DB", + replacePath: []string{"VLAN", ""}, + replaceVal: pb.TypedValue{ + Value: &pb.TypedValue_JsonIetfVal{jByte1}, + }, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"VLAN", ""}, + op: int32(pb.UpdateResult_REPLACE), + }, + wantFV: tblFV{ + tbl: "VLAN|Vlan10", + f: "", + v: map[string]string{"admin_status": "up", "description": "vlan_trunk", "mtu": "1200"}, + }, + }, + { + desc: "Replace path value json mapmap del", + in: SimpleSRequest{ + target: "CONFIG_DB", + replacePath: []string{"VLAN", ""}, + replaceVal: pb.TypedValue{ + Value: &pb.TypedValue_JsonIetfVal{jByte2}, + }, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"VLAN", ""}, + op: int32(pb.UpdateResult_REPLACE), + }, + wantFV: tblFV{ + tbl: "VLAN|Vlan20", + f: "", + v: map[string]string{"mtu": "1200"}, + }, + }, + { + desc: "Replace path value int", + in: SimpleSRequest{ + target: "CONFIG_DB", + replacePath: []string{"TELEMETRY_CLIENT", "Global", "retry_interval"}, + replaceVal: pb.TypedValue{ + Value: &pb.TypedValue_IntVal{5}, + }, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"TELEMETRY_CLIENT", "Global", "retry_interval"}, + op: int32(pb.UpdateResult_REPLACE), + }, + wantFV: tblFV{ + tbl: "TELEMETRY_CLIENT|Global", + f: "retry_interval", + v: "5", + }, + }, + { + desc: "Replace path value string", + in: SimpleSRequest{ + target: "CONFIG_DB", + replacePath: []string{"TELEMETRY_CLIENT", "DestinationGroup_TEST", "dst_addr"}, + replaceVal: pb.TypedValue{ + Value: &pb.TypedValue_StringVal{"20.20.20.20:8081"}, + }, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"TELEMETRY_CLIENT", "DestinationGroup_TEST", "dst_addr"}, + op: int32(pb.UpdateResult_REPLACE), + }, + wantFV: tblFV{ + tbl: "TELEMETRY_CLIENT|DestinationGroup_TEST", + f: "dst_addr", + v: "20.20.20.20:8081", + }, + }, + { + desc: "Replace path value json", + in: SimpleSRequest{ + target: "CONFIG_DB", + replacePath: []string{"TELEMETRY_CLIENT", "Global"}, + replaceVal: pb.TypedValue{ + Value: &pb.TypedValue_JsonIetfVal{jByte}, + }, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"TELEMETRY_CLIENT", "Global"}, + op: int32(pb.UpdateResult_REPLACE), + }, + wantFV: tblFV{ + tbl: "TELEMETRY_CLIENT|Global", + f: "", + v: map[string]string{ + "retry_interval": "60", + "encoding": "JSON_IETF", + "unidirectional": "true", + }, + }, + }, + { + desc: "Update path value json mapmap change", + in: SimpleSRequest{ + target: "CONFIG_DB", + updatePath: []string{"VLAN", ""}, + updateVal: pb.TypedValue{ + Value: &pb.TypedValue_JsonIetfVal{jByte2}, + }, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"VLAN", ""}, + op: int32(pb.UpdateResult_UPDATE), + }, + wantFV: tblFV{ + tbl: "VLAN|Vlan20", + f: "", + v: map[string]string{"admin_status": "down", "mtu": "1200"}, + }, + }, + { + desc: "Update path not supported", + in: SimpleSRequest{ + target: "CONFIG_DB", + updatePath: []string{"PORT", "Ethernet1", "alias"}, + updateVal: pb.TypedValue{ + Value: &pb.TypedValue_StringVal{"1"}, + }, + }, + want: SimpleSResponse{ + target: "CONFIG_DB", + path: []string{"TELEMETRY_CLIENT", "DestinationGroup_TEST", "dst_addr"}, + op: int32(pb.UpdateResult_UPDATE), + }, + wantFV: tblFV{ + tbl: "TELEMETRY_CLIENT|DestinationGroup_TEST", + f: "dst_addr", + v: "20.20.20.20:8081", + }, + wantErr: "config [CONFIG_DB PORT Ethernet1 alias] not supported", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + rclient.FlushDb() + rclient.HSet("TELEMETRY_CLIENT|Global", "retry_interval", "30") + rclient.HSet("TELEMETRY_CLIENT|DestinationGroup_TEST", "dst_addr", "10.10.10.10:8081") + rclient.HSet("VLAN|Vlan20", "admin_status", "down") + rclient.HSet("VLAN|Vlan20", "mtu", "9180") + + request := newSetRequest(&test.in) + //t.Log("SetRequest: ", request) + resp, err := gClient.Set(ctx, request) + // Check return code + gotRetStatus, ok := status.FromError(err) + if !ok { + t.Fatal("got a non-grpc error from grpc call") + } + if (len(test.wantErr) == 0 && gotRetStatus.Code() != codes.OK) || + (len(test.wantErr) != 0 && gotRetStatus.Message() != test.wantErr) { + t.Log("err: ", err) + t.Fatalf("got return code %v, want %v", gotRetStatus.Message(), test.wantErr) + } + + if len(test.wantErr) == 0 { + if resp == nil { + t.Fatal("got SetResponse nil") + } + if !compareSetResponse(resp, &test.want) { + t.Errorf("SetResponse is not expected: %v", resp) + t.Logf("want: %v", test.want) + } + + // Check field value in DB + if test.wantFV.f != "" { + val, _ := rclient.HGet(test.wantFV.tbl, test.wantFV.f).Result() + if val != test.wantFV.v { + t.Errorf("got (%v) != wantFV.v (%v)", val, test.wantFV.v) + } + } else { + val, _ := rclient.HGetAll(test.wantFV.tbl).Result() + if !reflect.DeepEqual(val, test.wantFV.v) { + t.Errorf("got (%v) != wantFV.v (%v)", val, test.wantFV.v) + } + } + } + }) + } + s.s.Stop() +} + func init() { // Inform gNMI server to use redis tcp localhost connection sdc.UseRedisLocalTcpPort = true diff --git a/sonic_data_client/bgp.go b/sonic_data_client/bgp.go new file mode 100644 index 000000000..cb8aa5cae --- /dev/null +++ b/sonic_data_client/bgp.go @@ -0,0 +1,87 @@ +package client + +import ( + "strings" + + log "github.com/golang/glog" +) + +type Bgpinfo struct { + Version string + AsNumber string + MsgRcvd string + MsgSent string + TblVer string + InQ string + OutQ string + UpDown string + State_PfxRcd string +} + +func GetIprouteNum() ([]byte, error) { + strout, err := getCommandOut("docker exec -i bgp vtysh -c \"show ip route summary\"") + if err != nil { + log.V(2).Infof("show ip route summary error %v", err) + return nil, err + } + stringlist := strings.Split(strout, "\n") + data := make(map[string]string) + for i := 0; strings.Split(stringlist[i], " ")[0] != ""; i++ { + if strings.Fields(stringlist[i])[0] == "Totals" { + data["Routes"] = strings.Fields(stringlist[i])[1] + data["FIB"] = strings.Fields(stringlist[i])[2] + } + } + return marshal(data) +} + +func GetPrefixNum() ([]byte, error) { + strout, err := getCommandOut("docker exec -i bgp vtysh -c \"show ip route summary prefix\"") + if err != nil { + log.V(2).Infof("show ip route summary prefix error %v", err) + return nil, err + } + stringlist := strings.Split(strout, "\n") + data := make(map[string]string) + for i := 0; strings.Split(stringlist[i], " ")[0] != ""; i++ { + if strings.Fields(stringlist[i])[0] == "Totals" { + data["PrefixRoutes"] = strings.Fields(stringlist[i])[1] + data["FIB"] = strings.Fields(stringlist[i])[2] + } + } + return marshal(data) +} + +func BgpSummary() ([]byte, error) { + strout, err := getCommandOut("docker exec -i bgp vtysh -c \"show ip bgp summary\"") + if err != nil { + log.V(2).Infof("show ip bgp summary error %v", err) + return nil, err + } + stringlist := strings.Split(strout, "\n") + data := make(map[string]Bgpinfo) + findbgpflag := false + bgpinfoindex := 0 + for i := 0; i < len(stringlist); i++ { + if strings.Split(stringlist[i], " ")[0] == "Neighbor" { + bgpinfoindex = i + 1 + findbgpflag = true + break + } + } + if findbgpflag == true { + for i := bgpinfoindex; strings.Split(stringlist[i], " ")[0] != ""; i++ { + data[strings.Fields(stringlist[i])[0]] = Bgpinfo{ + strings.Fields(stringlist[i])[1], + strings.Fields(stringlist[i])[2], + strings.Fields(stringlist[i])[3], + strings.Fields(stringlist[i])[4], + strings.Fields(stringlist[i])[5], + strings.Fields(stringlist[i])[6], + strings.Fields(stringlist[i])[7], + strings.Fields(stringlist[i])[8], + strings.Fields(stringlist[i])[9]} + } + } + return marshal(data) +} diff --git a/sonic_data_client/db_client.go b/sonic_data_client/db_client.go index 3ea813f8c..7d560653d 100644 --- a/sonic_data_client/db_client.go +++ b/sonic_data_client/db_client.go @@ -2,10 +2,10 @@ package client import ( - "bytes" "encoding/json" "fmt" "net" + "os" "reflect" "strconv" "strings" @@ -44,6 +44,8 @@ type Client interface { PollRun(q *queue.PriorityQueue, poll chan struct{}, w *sync.WaitGroup) // Get return data from the data source in format of *spb.Value Get(w *sync.WaitGroup) ([]*spb.Value, error) + // Set table field from pb path and val + Set(path *gnmipb.Path, val interface{}, replaceflag bool) error // Close provides implemenation for explicit cleanup of Client Close() error } @@ -59,22 +61,6 @@ var UseRedisLocalTcpPort bool = false // redis client connected to each DB var Target2RedisDb = make(map[string]*redis.Client) -type tablePath struct { - dbName string - tableName string - tableKey string - delimitor string - field string - // path name to be used in json data which may be different - // from the real data path. Ex. in Counters table, real tableKey - // is oid:0x####, while key name like Ethernet## may be put - // in json data. They are to be filled in populateDbtablePath() - jsonTableName string - jsonTableKey string - jsonDelimitor string - jsonField string -} - type Value struct { *spb.Value } @@ -113,11 +99,7 @@ func NewDbClient(paths []*gnmipb.Path, prefix *gnmipb.Path) (Client, error) { useRedisTcpClient() } if prefix.GetTarget() == "COUNTERS_DB" { - err = initCountersPortNameMap() - if err != nil { - return nil, err - } - err = initCountersQueueNameMap() + err = initCountersNameMap() if err != nil { return nil, err } @@ -125,7 +107,10 @@ func NewDbClient(paths []*gnmipb.Path, prefix *gnmipb.Path) (Client, error) { client.prefix = prefix client.pathG2S = make(map[*gnmipb.Path][]tablePath) - err = populateAllDbtablePath(prefix, paths, &client.pathG2S) + for _, path := range paths { + fp := gnmiFullPath(prefix, path) + client.pathG2S[fp] = nil + } if err != nil { return nil, err @@ -147,8 +132,14 @@ func (c *DbClient) StreamRun(q *queue.PriorityQueue, stop chan struct{}, w *sync c.q = q c.channel = stop + err := getAllTablePath(c.pathG2S, true) + if err != nil { + log.V(1).Infof("get table path fail %v", err) + return + } + for gnmiPath, tblPaths := range c.pathG2S { - if tblPaths[0].field != "" { + if tblPaths[0].fields != "" { c.w.Add(1) c.synced.Add(1) if len(tblPaths) > 1 { @@ -199,6 +190,11 @@ func (c *DbClient) PollRun(q *queue.PriorityQueue, poll chan struct{}, w *sync.W return } t1 := time.Now() + err := getAllTablePath(c.pathG2S, false) + if err != nil { + log.V(1).Infof("get table path fail %v", err) + return + } for gnmiPath, tblPaths := range c.pathG2S { val, err := tableData2TypedValue(tblPaths, nil) if err != nil { @@ -231,6 +227,19 @@ func (c *DbClient) Get(w *sync.WaitGroup) ([]*spb.Value, error) { // wait sync for Get, not used for now c.w = w + err := getAllTablePath(c.pathG2S, false) + if err != nil { + log.V(2).Infof("get table path fail") + return nil, err + } + + deviceName, err := os.Hostname() + if err != nil { + log.V(2).Infof("Get hostname error: %v", err) + } else { + c.prefix.Origin = deviceName + } + var values []*spb.Value ts := time.Now() for gnmiPath, tblPaths := range c.pathG2S { @@ -250,6 +259,130 @@ func (c *DbClient) Get(w *sync.WaitGroup) ([]*spb.Value, error) { log.V(4).Infof("Get done, total time taken: %v ms", int64(time.Since(ts)/time.Millisecond)) return values, nil } +func fsetconfigdb(redisDb *redis.Client, key string, val map[string]interface{}, replaceflag bool) error { + newfv := val + oldfv, err := redisDb.HGetAll(key).Result() + if err != nil { + log.V(2).Infof("redis HGetALL failed for %v", key) + return err + } + if replaceflag == true { + for f, v := range oldfv { + vv, ok := newfv[f] + if ok { + if vv != v { + _, err := redisDb.HSet(key, f, vv).Result() + if err != nil { + log.V(2).Infof("redis HSet failed for %v %v", key, f) + } + } + delete(newfv, f) + } else { + _, err := redisDb.HDel(key, f).Result() + if err != nil { + log.V(2).Infof("redis HDel failed for %v %v", key, f) + return err + } + } + } + + for f, v := range newfv { + _, err := redisDb.HSet(key, f, v).Result() + if err != nil { + log.V(2).Infof("redis HSet failed for %v %v", key, f) + } + } + } else { + for f, v := range newfv { + _, err := redisDb.HSet(key, f, v).Result() + if err != nil { + log.V(2).Infof("redis HSet failed for %v %v", key, f) + } + } + } + return nil +} +func (c *DbClient) Set(path *gnmipb.Path, val interface{}, replaceflag bool) error { + fp := gnmiFullPath(c.prefix, path) + gsp, err := newGSPath(fp) + if err != nil { + return err + } + err = gsp.GetCfgPath() + if err != nil { + return err + } + + tblPaths := gsp.tpath + log.V(6).Infof("Set: path %v", tblPaths) + for _, tp := range tblPaths { + redisDb := Target2RedisDb[tp.dbName] + var key string + if tp.tableKey != "" { + key = tp.tableName + tp.delimitor + tp.tableKey + } else { + key = tp.tableName + } + + log.V(6).Infof("Set: key %v fields %v val %v", key, tp.fields, val) + if tp.fields != "" { + switch val.(type) { + case string: + if val == "" { + _, err := redisDb.HDel(key, tp.fields).Result() + if err != nil { + log.V(2).Infof("redis HDel failed for %v", tp) + return err + } + } else { + _, err := redisDb.HSet(key, tp.fields, val).Result() + if err != nil { + log.V(2).Infof("redis HSet failed for %v", tp) + return err + } + } + default: + return fmt.Errorf("Set key %v fields %v val %v : type isn't string", key, tp.fields, val) + } + } else { + switch val.(type) { + case map[string]string: + newfvtemp := val.(map[string]string) + newfv := make(map[string]interface{}) + for ktemp, vtemp := range newfvtemp { + newfv[ktemp] = interface{}(vtemp) + } + err := fsetconfigdb(redisDb, key, newfv, replaceflag) + if err != nil { + log.V(2).Infof("setconfigdb failed for %v %v", key, newfv) + return err + } + case map[string]map[string]interface{}: + newfv := val.(map[string]map[string]interface{}) + for f1, v1 := range newfv { + key = tp.tableName + tp.delimitor + f1 + err := fsetconfigdb(redisDb, key, v1, replaceflag) + if err != nil { + log.V(2).Infof("setconfigdb failed for %v %v", key, newfv) + return err + } + } + case string: + if val == "" { + newfv := map[string]interface{}{} + err := fsetconfigdb(redisDb, key, newfv, replaceflag) + if err != nil { + log.V(2).Infof("setconfigdb failed for %v %v", key, newfv) + return err + } + } + default: + return fmt.Errorf("Set key %v val %v type isn't map[string]string", key, val) + } + } + } + return nil +} // TODO: Log data related to this session func (c *DbClient) Close() error { @@ -349,8 +482,9 @@ func init() { // gnmiFullPath builds the full path from the prefix and path. func gnmiFullPath(prefix, path *gnmipb.Path) *gnmipb.Path { - - fullPath := &gnmipb.Path{Origin: path.Origin} + fullPath := &gnmipb.Path{ + Origin: path.Origin, + Target: prefix.GetTarget()} if path.GetElement() != nil { fullPath.Element = append(prefix.GetElement(), path.GetElement()...) } @@ -360,125 +494,22 @@ func gnmiFullPath(prefix, path *gnmipb.Path) *gnmipb.Path { return fullPath } -func populateAllDbtablePath(prefix *gnmipb.Path, paths []*gnmipb.Path, pathG2S *map[*gnmipb.Path][]tablePath) error { - for _, path := range paths { - err := populateDbtablePath(prefix, path, pathG2S) - if err != nil { - return err - } - } - return nil -} - -// Populate table path in DB from gnmi path -func populateDbtablePath(prefix, path *gnmipb.Path, pathG2S *map[*gnmipb.Path][]tablePath) error { - var buffer bytes.Buffer - var dbPath string - var tblPath tablePath - - target := prefix.GetTarget() - // Verify it is a valid db name - redisDb, ok := Target2RedisDb[target] - if !ok { - return fmt.Errorf("Invalid target name %v", target) - } - - fullPath := path - if prefix != nil { - fullPath = gnmiFullPath(prefix, path) - } - - stringSlice := []string{target} - separator, _ := GetTableKeySeparator(target) - elems := fullPath.GetElem() - if elems != nil { - for i, elem := range elems { - // TODO: Usage of key field - log.V(6).Infof("index %d elem : %#v %#v", i, elem.GetName(), elem.GetKey()) - if i != 0 { - buffer.WriteString(separator) +func getAllTablePath(pathG2S map[*gnmipb.Path][]tablePath, allowNotFound bool) error { + for path, tbPath := range pathG2S { + if nil == tbPath { + gsp, err := newGSPath(path) + if err != nil { + return err + } + err = gsp.GetDbPath(allowNotFound) + if err != nil { + return err + } + if gsp.tpath != nil { + pathG2S[path] = gsp.tpath } - buffer.WriteString(elem.GetName()) - stringSlice = append(stringSlice, elem.GetName()) - } - dbPath = buffer.String() - } - - // First lookup the Virtual path to Real path mapping tree - // The path from gNMI might not be real db path - if tblPaths, err := lookupV2R(stringSlice); err == nil { - (*pathG2S)[path] = tblPaths - log.V(5).Infof("v2r from %v to %+v ", stringSlice, tblPaths) - return nil - } else { - log.V(5).Infof("v2r lookup failed for %v %v", stringSlice, err) - } - - tblPath.dbName = target - tblPath.tableName = stringSlice[1] - tblPath.delimitor = separator - - var mappedKey string - if len(stringSlice) > 2 { // tmp, to remove mappedKey - mappedKey = stringSlice[2] - } - - // The expect real db path could be in one of the formats: - // <1> DB Table - // <2> DB Table Key - // <3> DB Table Field - // <4> DB Table Key Field - // <5> DB Table Key Key Field - switch len(stringSlice) { - case 2: // only table name provided - res, err := redisDb.Keys(tblPath.tableName + "*").Result() - if err != nil || len(res) < 1 { - log.V(2).Infof("Invalid db table Path %v %v", target, dbPath) - return fmt.Errorf("Failed to find %v %v %v %v", target, dbPath, err, res) - } - tblPath.tableKey = "" - case 3: // Third element could be table key; or field name in which case table name itself is the key too - n, err := redisDb.Exists(tblPath.tableName + tblPath.delimitor + mappedKey).Result() - if err != nil { - return fmt.Errorf("redis Exists op failed for %v", dbPath) - } - if n == 1 { - tblPath.tableKey = mappedKey - } else { - tblPath.field = mappedKey - } - case 4: // Fourth element could part of the table key or field name - tblPath.tableKey = mappedKey + tblPath.delimitor + stringSlice[3] - // verify whether this key exists - key := tblPath.tableName + tblPath.delimitor + tblPath.tableKey - n, err := redisDb.Exists(key).Result() - if err != nil { - return fmt.Errorf("redis Exists op failed for %v", dbPath) - } - if n != 1 { // Looks like the Fourth slice is not part of the key - tblPath.tableKey = mappedKey - tblPath.field = stringSlice[3] - } - case 5: // both third and fourth element are part of table key, fourth element must be field name - tblPath.tableKey = mappedKey + tblPath.delimitor + stringSlice[3] - tblPath.field = stringSlice[4] - default: - log.V(2).Infof("Invalid db table Path %v", dbPath) - return fmt.Errorf("Invalid db table Path %v", dbPath) - } - - var key string - if tblPath.tableKey != "" { - key = tblPath.tableName + tblPath.delimitor + tblPath.tableKey - n, _ := redisDb.Exists(key).Result() - if n != 1 { - log.V(2).Infof("No valid entry found on %v with key %v", dbPath, key) - return fmt.Errorf("No valid entry found on %v with key %v", dbPath, key) } } - - (*pathG2S)[path] = []tablePath{tblPath} - log.V(5).Infof("tablePath %+v", tblPath) return nil } @@ -531,7 +562,7 @@ func tableData2Msi(tblPath *tablePath, useKey bool, op *string, msi *map[string] var pattern string var dbkeys []string var err error - var fv map[string]string + fv := map[string]string{} //Only table name provided if tblPath.tableKey == "" { @@ -551,21 +582,28 @@ func tableData2Msi(tblPath *tablePath, useKey bool, op *string, msi *map[string] dbkeys = []string{tblPath.tableName + tblPath.delimitor + tblPath.tableKey} } - // Asked to use jsonField and jsonTableKey in the final json value - if tblPath.jsonField != "" && tblPath.jsonTableKey != "" { - val, err := redisDb.HGet(dbkeys[0], tblPath.field).Result() - if err != nil { - log.V(3).Infof("redis HGet failed for %v %v", tblPath, err) - // ignore non-existing field which was derived from virtual path - return nil + // Asked to use jsonFields and fields in the final json value + if tblPath.fields != "" && tblPath.jsonFields != "" { + fs := strings.Split(tblPath.fields, ",") + for _, f := range fs { + val, err := redisDb.HGet(dbkeys[0], f).Result() + if err != nil { + log.V(3).Infof("redis HGet failed for %v %v", tblPath, err) + // ignore non-existing field which was derived from virtual path + return nil + } + fv[f] = val } - fv = map[string]string{tblPath.jsonField: val} - makeJSON_redis(msi, &tblPath.jsonTableKey, op, fv) log.V(6).Infof("Added json key %v fv %v ", tblPath.jsonTableKey, fv) + if tblPath.jsonTableKey != "" { + makeJSON_redis(msi, &tblPath.jsonTableKey, op, fv) + } else { + makeJSON_redis(msi, nil, op, fv) + } return nil } - for idx, dbkey := range dbkeys { + for _, dbkey := range dbkeys { fv, err = redisDb.HGetAll(dbkey).Result() if err != nil { log.V(2).Infof("redis HGetAll failed for %v, dbkey %s", tblPath, dbkey) @@ -587,7 +625,7 @@ func tableData2Msi(tblPath *tablePath, useKey bool, op *string, msi *map[string] log.V(2).Infof("makeJSON err %s for fv %v", err, fv) return err } - log.V(6).Infof("Added idex %v fv %v ", idx, fv) + log.V(6).Infof("Added dbkey %v fv %v ", dbkey, fv) } return nil } @@ -610,12 +648,17 @@ func tableData2TypedValue(tblPaths []tablePath, op *string) (*gnmipb.TypedValue, for _, tblPath := range tblPaths { redisDb := Target2RedisDb[tblPath.dbName] - if tblPath.jsonField == "" { // Not asked to include field in json value, which means not wildcard query + if tblPath.jsonFields == "" { // Not asked to include field in json value, which means not wildcard query // table path includes table, key and field - if tblPath.field != "" { + if tblPath.fields != "" { if len(tblPaths) != 1 { log.V(2).Infof("WARNING: more than one path exists for field granularity query: %v", tblPaths) } + if strings.Contains(tblPath.fields, ",") { + log.V(2).Infof("multiple fields need jsonFields not empty: %v", tblPath.fields) + return nil, fmt.Errorf("Invalid field %v", tblPath.fields) + } + var key string if tblPath.tableKey != "" { key = tblPath.tableName + tblPath.delimitor + tblPath.tableKey @@ -623,7 +666,7 @@ func tableData2TypedValue(tblPaths []tablePath, op *string) (*gnmipb.TypedValue, key = tblPath.tableName } - val, err := redisDb.HGet(key, tblPath.field).Result() + val, err := redisDb.HGet(key, tblPath.fields).Result() if err != nil { log.V(2).Infof("redis HGet failed for %v", tblPath) return nil, err @@ -676,6 +719,12 @@ func dbFieldMultiSubscribe(gnmiPath *gnmipb.Path, c *DbClient) { msi := make(map[string]interface{}) for _, tblPath := range tblPaths { var key string + + // TODO: support multiple fields for stream mode + if strings.Contains(tblPath.fields, ",") { + log.V(2).Infof("multiple fields not support here: %v", tblPath.fields) + continue + } if tblPath.tableKey != "" { key = tblPath.tableName + tblPath.delimitor + tblPath.tableKey } else { @@ -683,26 +732,26 @@ func dbFieldMultiSubscribe(gnmiPath *gnmipb.Path, c *DbClient) { } // run redis get directly for field value redisDb := Target2RedisDb[tblPath.dbName] - val, err := redisDb.HGet(key, tblPath.field).Result() + val, err := redisDb.HGet(key, tblPath.fields).Result() if err == redis.Nil { - if tblPath.jsonField != "" { + if tblPath.jsonFields != "" { // ignore non-existing field which was derived from virtual path continue } - log.V(2).Infof("%v doesn't exist with key %v in db", tblPath.field, key) - enqueFatalMsg(c, fmt.Sprintf("%v doesn't exist with key %v in db", tblPath.field, key)) + log.V(2).Infof("%v doesn't exist with key %v in db", tblPath.fields, key) + enqueFatalMsg(c, fmt.Sprintf("%v doesn't exist with key %v in db", tblPath.fields, key)) return } if err != nil { - log.V(1).Infof(" redis HGet error on %v with key %v", tblPath.field, key) - enqueFatalMsg(c, fmt.Sprintf(" redis HGet error on %v with key %v", tblPath.field, key)) + log.V(1).Infof(" redis HGet error on %v with key %v", tblPath.fields, key) + enqueFatalMsg(c, fmt.Sprintf(" redis HGet error on %v with key %v", tblPath.fields, key)) return } if val == path2ValueMap[tblPath] { continue } path2ValueMap[tblPath] = val - fv := map[string]string{tblPath.jsonField: val} + fv := map[string]string{tblPath.jsonFields: val} msi[tblPath.jsonTableKey] = fv log.V(6).Infof("new value %v for %v", val, tblPath) } @@ -754,6 +803,12 @@ func dbFieldSubscribe(gnmiPath *gnmipb.Path, c *DbClient) { key = tblPath.tableName } + // TODO: support multiple fields for stream mode + if strings.Contains(tblPath.fields, ",") { + log.V(2).Infof("multiple fields not support here: %v", tblPath.fields) + return + } + var val string for { select { @@ -761,15 +816,15 @@ func dbFieldSubscribe(gnmiPath *gnmipb.Path, c *DbClient) { log.V(1).Infof("Stopping dbFieldSubscribe routine for Client %s ", c) return default: - newVal, err := redisDb.HGet(key, tblPath.field).Result() + newVal, err := redisDb.HGet(key, tblPath.fields).Result() if err == redis.Nil { - log.V(2).Infof("%v doesn't exist with key %v in db", tblPath.field, key) - enqueFatalMsg(c, fmt.Sprintf("%v doesn't exist with key %v in db", tblPath.field, key)) + log.V(2).Infof("%v doesn't exist with key %v in db", tblPath.fields, key) + enqueFatalMsg(c, fmt.Sprintf("%v doesn't exist with key %v in db", tblPath.fields, key)) return } if err != nil { - log.V(1).Infof(" redis HGet error on %v with key %v", tblPath.field, key) - enqueFatalMsg(c, fmt.Sprintf(" redis HGet error on %v with key %v", tblPath.field, key)) + log.V(1).Infof(" redis HGet error on %v with key %v", tblPath.fields, key) + enqueFatalMsg(c, fmt.Sprintf(" redis HGet error on %v with key %v", tblPath.fields, key)) return } if newVal != val { @@ -859,7 +914,7 @@ func dbSingleTableKeySubscribe(rsd redisSubData, c *DbClient, msiOut *map[string continue } tblPath.tableKey = subscr.Channel[prefixLen:] - err = tableData2Msi(&tblPath, false, nil, &newMsi) + err = tableData2Msi(&tblPath, true, nil, &newMsi) if err != nil { enqueFatalMsg(c, err.Error()) return @@ -982,6 +1037,7 @@ func dbTableKeySubscribe(gnmiPath *gnmipb.Path, c *DbClient) { } if val != nil { spbv = &spb.Value{ + Prefix: c.prefix, Path: gnmiPath, Timestamp: time.Now().UnixNano(), Val: val, diff --git a/sonic_data_client/non_db_client.go b/sonic_data_client/non_db_client.go index a42f5956d..6c6a1c0bf 100644 --- a/sonic_data_client/non_db_client.go +++ b/sonic_data_client/non_db_client.go @@ -1,10 +1,8 @@ package client import ( - "encoding/json" "fmt" spb "github.com/Azure/sonic-telemetry/proto" - linuxproc "github.com/c9s/goprocinfo/linux" log "github.com/golang/glog" gnmipb "github.com/openconfig/gnmi/proto/gnmi" "github.com/workiva/go-datastructures/queue" @@ -15,10 +13,6 @@ import ( // Non db client is to Handle // <1> data not in SONiC redis db -const ( - statsRingCap uint64 = 3000 // capacity of statsRing. -) - type dataGetFunc func() ([]byte, error) type path2DataFunc struct { @@ -26,42 +20,55 @@ type path2DataFunc struct { getFunc dataGetFunc } -type statsRing struct { - writeIdx uint64 // slot index to write next - buff []*linuxproc.Stat - mu sync.RWMutex // Mutex for data protection -} - var ( clientTrie *Trie - statsR statsRing // path2DataFuncTbl is used to populate trie tree which is reponsible // for getting data at the path specified path2DataFuncTbl = []path2DataFunc{ - { // Get cpu utilizaation - path: []string{"OTHERS", "platform", "cpu"}, - getFunc: dataGetFunc(getCpuUtil), + { // Get system cpu usage + path: []string{"OTHERS", "system", "cpu"}, + getFunc: dataGetFunc(GetCpuUtil), + }, + { // Get system vmstat + path: []string{"OTHERS", "system", "memory"}, + getFunc: dataGetFunc(GetMemInfo), + }, + { // Get system disk stat + path: []string{"OTHERS", "system", "disk"}, + getFunc: dataGetFunc(GetDiskUsage), + }, + { // Get system version + path: []string{"OTHERS", "system", "version"}, + getFunc: dataGetFunc(GetVersion), + }, + { // Get system ntpstat + path: []string{"OTHERS", "system", "ntp"}, + getFunc: dataGetFunc(GetNtpStat), + }, + { // Get system down reason + path: []string{"OTHERS", "system", "down"}, + getFunc: dataGetFunc(GetDownReason), }, - { // Get proc meminfo - path: []string{"OTHERS", "proc", "meminfo"}, - getFunc: dataGetFunc(getProcMeminfo), + { // Get bgp summary + path: []string{"OTHERS", "bgp", "summary"}, + getFunc: dataGetFunc(BgpSummary), }, - { // Get proc diskstats - path: []string{"OTHERS", "proc", "diskstats"}, - getFunc: dataGetFunc(getProcDiskstats), + { // Get iproute num + path: []string{"OTHERS", "iproute", "num"}, + getFunc: dataGetFunc(GetIprouteNum), }, - { // Get proc loadavg - path: []string{"OTHERS", "proc", "loadavg"}, - getFunc: dataGetFunc(getProcLoadavg), + { // Get iproute num + path: []string{"OTHERS", "prefix", "num"}, + getFunc: dataGetFunc(GetPrefixNum), }, - { // Get proc vmstat - path: []string{"OTHERS", "proc", "vmstat"}, - getFunc: dataGetFunc(getProcVmstat), + { // Get iproute num + path: []string{"OTHERS", "interface", "rate"}, + getFunc: dataGetFunc(GetRxTxRate), }, - { // Get proc stat - path: []string{"OTHERS", "proc", "stat"}, - getFunc: dataGetFunc(getProcStat), + { // Get iproute num + path: []string{"OTHERS", "configdb", "all"}, + getFunc: dataGetFunc(GetConfigdb), }, } ) @@ -74,202 +81,12 @@ func (t *Trie) clientTriePopulate() { } else { log.V(2).Infof("Add trie node for %v with %v", pt.path, pt.getFunc) } - - } -} - -type cpuStat struct { - CpuUsageAll cpuUtil `json:"cpu_all"` - CpuUsage []cpuUtil `json:"cpus"` -} - -// Cpu utilization rate -type cpuUtil struct { - Id string `json:"id"` - CpuUtil_100ms uint64 `json:"100ms"` - CpuUtil_1s uint64 `json:"1s"` - CpuUtil_5s uint64 `json:"5s"` - CpuUtil_1min uint64 `json:"1min"` - CpuUtil_5min uint64 `json:"5min"` -} - -func getCpuUtilPercents(cur, last *linuxproc.CPUStat) uint64 { - curTotal := (cur.User + cur.Nice + cur.System + cur.Idle + cur.IOWait + cur.IRQ + cur.SoftIRQ + cur.Steal + cur.Guest + cur.GuestNice) - lastTotal := (last.User + last.Nice + last.System + last.Idle + last.IOWait + last.IRQ + last.SoftIRQ + last.Steal + last.Guest + last.GuestNice) - idleTicks := cur.Idle - last.Idle - totalTicks := curTotal - lastTotal - return 100 * (totalTicks - idleTicks) / totalTicks -} - -func getCpuUtilStat() *cpuStat { - - stat := cpuStat{} - statsR.mu.RLock() - defer statsR.mu.RUnlock() - - current := (statsR.writeIdx + statsRingCap - 1) % statsRingCap - // Get cpu utilization rate within last 100ms - last := (statsR.writeIdx + statsRingCap - 2) % statsRingCap - if statsR.buff[last] == nil { - return &stat - } - - curCpuStat := statsR.buff[current].CPUStatAll - lastCpuStat := statsR.buff[last].CPUStatAll - - CpuUtil_100ms := getCpuUtilPercents(&curCpuStat, &lastCpuStat) - stat.CpuUsageAll.Id = curCpuStat.Id - stat.CpuUsageAll.CpuUtil_100ms = CpuUtil_100ms - for i, cStat := range statsR.buff[last].CPUStats { - CpuUtil_100ms = getCpuUtilPercents(&statsR.buff[current].CPUStats[i], &cStat) - stat.CpuUsage = append(stat.CpuUsage, cpuUtil{Id: cStat.Id, CpuUtil_100ms: CpuUtil_100ms}) - } - - // Get cpu utilization rate within last 1s (10*100ms) - last = (statsR.writeIdx + statsRingCap - 10) % statsRingCap - if statsR.buff[last] == nil { - return &stat - } - lastCpuStat = statsR.buff[last].CPUStatAll - CpuUtil_1s := getCpuUtilPercents(&curCpuStat, &lastCpuStat) - stat.CpuUsageAll.CpuUtil_1s = CpuUtil_1s - for i, cStat := range statsR.buff[last].CPUStats { - CpuUtil_1s = getCpuUtilPercents(&statsR.buff[current].CPUStats[i], &cStat) - stat.CpuUsage[i].CpuUtil_1s = CpuUtil_1s - } - - // Get cpu utilization rate within last 5s (50*100ms) - last = (statsR.writeIdx + statsRingCap - 50) % statsRingCap - if statsR.buff[last] == nil { - return &stat - } - lastCpuStat = statsR.buff[last].CPUStatAll - CpuUtil_5s := getCpuUtilPercents(&curCpuStat, &lastCpuStat) - stat.CpuUsageAll.CpuUtil_5s = CpuUtil_5s - for i, cStat := range statsR.buff[last].CPUStats { - CpuUtil_5s = getCpuUtilPercents(&statsR.buff[current].CPUStats[i], &cStat) - stat.CpuUsage[i].CpuUtil_5s = CpuUtil_5s - } - - // Get cpu utilization rate within last 1m (600*100ms) - last = (statsR.writeIdx + statsRingCap - 600) % statsRingCap - if statsR.buff[last] == nil { - return &stat - } - lastCpuStat = statsR.buff[last].CPUStatAll - CpuUtil_1min := getCpuUtilPercents(&curCpuStat, &lastCpuStat) - stat.CpuUsageAll.CpuUtil_1min = CpuUtil_1min - for i, cStat := range statsR.buff[last].CPUStats { - CpuUtil_1min = getCpuUtilPercents(&statsR.buff[current].CPUStats[i], &cStat) - stat.CpuUsage[i].CpuUtil_1min = CpuUtil_1min - } - - // Get cpu utilization rate within last 5m (5*600*100ms) - last = (statsR.writeIdx + statsRingCap - 30000) % statsRingCap - if statsR.buff[last] == nil { - return &stat - } - lastCpuStat = statsR.buff[last].CPUStatAll - CpuUtil_5min := getCpuUtilPercents(&curCpuStat, &lastCpuStat) - stat.CpuUsageAll.CpuUtil_5min = CpuUtil_5min - for i, cStat := range statsR.buff[last].CPUStats { - CpuUtil_5min = getCpuUtilPercents(&statsR.buff[current].CPUStats[i], &cStat) - stat.CpuUsage[i].CpuUtil_5min = CpuUtil_5min - } - return &stat -} - -func getCpuUtil() ([]byte, error) { - cpuStat := getCpuUtilStat() - log.V(4).Infof("getCpuUtil, cpuStat %v", cpuStat) - b, err := json.Marshal(cpuStat) - if err != nil { - log.V(2).Infof("%v", err) - return b, err - } - log.V(4).Infof("getCpuUtil, output %v", string(b)) - return b, nil -} - -func getProcMeminfo() ([]byte, error) { - memInfo, _ := linuxproc.ReadMemInfo("/proc/meminfo") - b, err := json.Marshal(memInfo) - if err != nil { - log.V(2).Infof("%v", err) - return b, err - } - log.V(4).Infof("getProcMeminfo, output %v", string(b)) - return b, nil -} - -func getProcDiskstats() ([]byte, error) { - diskStats, _ := linuxproc.ReadDiskStats("/proc/diskstats") - b, err := json.Marshal(diskStats) - if err != nil { - log.V(2).Infof("%v", err) - return b, err - } - log.V(4).Infof("getProcDiskstats, output %v", string(b)) - return b, nil -} - -func getProcLoadavg() ([]byte, error) { - loadAvg, _ := linuxproc.ReadLoadAvg("/proc/loadavg") - b, err := json.Marshal(loadAvg) - if err != nil { - log.V(2).Infof("%v", err) - return b, err - } - log.V(4).Infof("getProcLoadavg, output %v", string(b)) - return b, nil -} - -func getProcVmstat() ([]byte, error) { - vmStat, _ := linuxproc.ReadVMStat("/proc/vmstat") - b, err := json.Marshal(vmStat) - if err != nil { - log.V(2).Infof("%v", err) - return b, err - } - log.V(4).Infof("getProcVmstat, output %v", string(b)) - return b, nil -} - -func getProcStat() ([]byte, error) { - stat, _ := linuxproc.ReadStat("/proc/stat") - b, err := json.Marshal(stat) - if err != nil { - log.V(2).Infof("%v", err) - return b, err - } - log.V(4).Infof("getProcStat, output %v", string(b)) - return b, nil -} - -func pollStats() { - for { - stat, err := linuxproc.ReadStat("/proc/stat") - if err != nil { - log.V(2).Infof("stat read fail") - continue - } - - statsR.mu.Lock() - - statsR.buff[statsR.writeIdx] = stat - statsR.writeIdx++ - statsR.writeIdx %= statsRingCap - statsR.mu.Unlock() - time.Sleep(time.Millisecond * 100) } - } func init() { clientTrie = NewTrie() clientTrie.clientTriePopulate() - statsR.buff = make([]*linuxproc.Stat, statsRingCap) - go pollStats() } type NonDbClient struct { @@ -405,6 +222,10 @@ func (c *NonDbClient) Get(w *sync.WaitGroup) ([]*spb.Value, error) { return values, nil } +func (c *NonDbClient) Set(path *gnmipb.Path, val interface{}, replaceflag bool) error { + return nil +} + // TODO: Log data related to this session func (c *NonDbClient) Close() error { return nil diff --git a/sonic_data_client/path.go b/sonic_data_client/path.go new file mode 100644 index 000000000..c164bd8e9 --- /dev/null +++ b/sonic_data_client/path.go @@ -0,0 +1,222 @@ +package client + +import ( + "fmt" + spb "github.com/Azure/sonic-telemetry/proto" + log "github.com/golang/glog" + gnmipb "github.com/openconfig/gnmi/proto/gnmi" +) + +var ( + cfgPermit = [][]string{ + []string{"CONFIG_DB", "TELEMETRY_CLIENT"}, + []string{"CONFIG_DB", "VLAN"}, + []string{"CONFIG_DB", "VLAN_MEMBER"}, + []string{"CONFIG_DB", "VLAN_INTERFACE"}, + []string{"CONFIG_DB", "BGP_NETWORK"}, + []string{"CONFIG_DB", "PORT", "*", "admin_status"}, + } +) + +type tablePath struct { + dbName string + tableName string + tableKey string + delimitor string + fields string + // path name to be used in json data which may be different + // from the real data path. Ex. in Counters table, real tableKey + // is oid:0x####, while key name like Ethernet## may be put + // in json data. + jsonTableName string + jsonTableKey string + jsonFields string +} + +type GSPath struct { + gpath []string // path string from gNMI path + tpath []tablePath // table path for SONiC DB +} + +// newGSPath construct new GSPath by gNMI path +func newGSPath(path *gnmipb.Path) (*GSPath, error) { + elems := path.GetElem() + if elems == nil { + log.V(2).Infof("empty path: %v", elems) + return nil, fmt.Errorf("empty path") + } + + if len(path.GetTarget()) == 0 { + return nil, fmt.Errorf("empty target") + } + gp := []string{path.GetTarget()} + for _, elem := range elems { + // TODO: Usage of key field + gp = append(gp, elem.GetName()) + } + log.V(6).Infof("path []string: %v", gp) + + return &GSPath{gpath: gp}, nil +} + +// GetDbPath return tablePath to get DB data +func (p *GSPath) GetDbPath(allowNotFound bool) error { + target := p.gpath[0] + if !isValidDbTarget(target) { + return fmt.Errorf("invaild db target: %v", target) + } + + if target == "COUNTERS_DB" { + tp, err := getv2rPath(p.gpath) + if err == nil { + p.tpath = tp + return nil + } + } + + rp, err := getTblPath(p.gpath, allowNotFound) + if err != nil { + return err + } + p.tpath = append(p.tpath, rp) + return nil +} + +// GetCfgpath check if path permit, return DB path to set value +func (p *GSPath) GetCfgPath() error { + target := p.gpath[0] + if !isValidDbTarget(target) { + return fmt.Errorf("invaild db target: %v", target) + } + + if target != "CONFIG_DB" { + return fmt.Errorf("config %s not supported", target) + } + + // Check if path permit + for _, s := range cfgPermit { + if pathPermit(p.gpath, s) { + rp, err := getTblPath(p.gpath, true) + if err != nil { + return err + } + p.tpath = append(p.tpath, rp) + return nil + } + } + + return fmt.Errorf("config %s not supported", p.gpath) +} + +func isValidDbTarget(t string) bool { + _, ok := spb.Target_value[t] + if t == "OTHERS" { + return false + } + return ok +} + +// getTblPath convert path string slice to real DB table path +func getTblPath(gp []string, allowNotFound bool) (tablePath, error) { + // not support only DB + if len(gp) < 2 { + return tablePath{}, fmt.Errorf("not support") + } + + target := gp[0] + separator, _ := GetTableKeySeparator(target) + tp := tablePath{ + dbName: target, + tableName: gp[1], + delimitor: separator, + } + + redisDb := Target2RedisDb[target] + + log.V(2).Infof("redisDb: %v tp: %v", redisDb, tp) + // The expect real db path could be in one of the formats: + // DB Table + // DB Table Key + // DB Table Key Key + // DB Table Key Field + // DB Table Key Key Field + var retError error + switch len(gp) { + case 2: // only table name provided + res, err := redisDb.Keys(tp.tableName + "*").Result() + if err != nil || len(res) < 1 { + log.V(2).Infof("Invalid db table Path %v %v", target, gp) + retError = fmt.Errorf("failed to find %v %v %v %v", target, gp, err, res) + } + tp.tableKey = "" + case 3: // Third element could be table key + _, err := redisDb.Exists(tp.tableName + tp.delimitor + gp[2]).Result() + if err != nil { + retError = fmt.Errorf("redis Exists op failed for %v", gp) + } + tp.tableKey = gp[2] + case 4: // Fourth element could part of the table key or field name + tp.tableKey = gp[2] + tp.delimitor + gp[3] + // verify whether this key exists + key := tp.tableName + tp.delimitor + tp.tableKey + n, err := redisDb.Exists(key).Result() + if err != nil { + retError = fmt.Errorf("redis Exists op failed for %v", gp) + } else if n != 1 { // Looks like the Fourth slice is not part of the key + tp.tableKey = gp[2] + tp.fields = gp[3] + } + case 5: // both third and fourth element are part of table key, fourth element must be field name + tp.tableKey = gp[2] + tp.delimitor + gp[3] + tp.fields = gp[4] + default: + log.V(2).Infof("Invalid db table Path %v", gp) + retError = fmt.Errorf("invalid db table Path %v", gp) + } + + if allowNotFound { + if nil != retError { + return tp, nil + } + } else { + if nil != retError { + return tablePath{}, retError + } + + var key string + if tp.tableKey != "" { + key = tp.tableName + tp.delimitor + tp.tableKey + n, _ := redisDb.Exists(key).Result() + if n != 1 { + log.V(2).Infof("No valid entry found on %v with key %v", gp, key) + return tablePath{}, fmt.Errorf("no valid entry found on %v with key %v", gp, key) + } + } + } + log.V(6).Infof("get tablePath: %v", tp) + + return tp, nil +} + +// pathPermit check if path is a subset of permit path +func pathPermit(a, permit []string) bool { + if (a == nil) || (permit == nil) { + return false + } + + if len(a) < len(permit) { + return false + } + + b := a[:len(permit)] + for i, v := range b { + if permit[i] == "*" { + continue + } + if v != permit[i] { + return false + } + } + + return true +} diff --git a/sonic_data_client/path_test.go b/sonic_data_client/path_test.go new file mode 100644 index 000000000..70836d68c --- /dev/null +++ b/sonic_data_client/path_test.go @@ -0,0 +1,305 @@ +package client + +import ( + //"flag" + "github.com/go-redis/redis" + "testing" +) + +func TestGetDbPath(t *testing.T) { + //flag.Set("alsologtostderr", "true") + //flag.Set("v", "6") + //flag.Parse() + UseRedisLocalTcpPort = true + useRedisTcpClient() + rclient := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "localhost:6379", + Password: "", // no password set + DB: 4, + DialTimeout: 0, + }) + _, err := rclient.Ping().Result() + if err != nil { + t.Fatalf("failed to connect to redis server %v", err) + } + defer rclient.Close() + + var tests = []struct { + desc string + input []string + wantErr string + wantPath []tablePath + }{ + { + desc: "Invalid DB", + input: []string{"TEST_DB", "COUNTERS"}, + wantErr: "invaild db target: TEST_DB", + wantPath: nil, + }, + { + desc: "Others target", + input: []string{"OTHERS", "proc", "meminfo"}, + wantErr: "invaild db target: OTHERS", + wantPath: nil, + }, + { + desc: "CONFIG_DB without table", + input: []string{"CONFIG_DB"}, + wantErr: "not support", + wantPath: nil, + }, + { + desc: "CONFIG_DB invalid table", + input: []string{"CONFIG_DB", "COUNTERS"}, + wantErr: "failed to find CONFIG_DB [CONFIG_DB COUNTERS] []", + wantPath: nil, + }, + { + desc: "CONFIG_DB PORT", + input: []string{"CONFIG_DB", "PORT"}, + wantErr: "", + wantPath: []tablePath{ + tablePath{ + dbName: "CONFIG_DB", + tableName: "PORT", + delimitor: "|", + }, + }, + }, + { + desc: "CONFIG_DB PORT invalid key", + input: []string{"CONFIG_DB", "PORT", "Ethernet100"}, + wantErr: "no valid entry found on [CONFIG_DB PORT Ethernet100] with key PORT|Ethernet100", + wantPath: nil, + }, + { + desc: "CONFIG_DB PORT Ethernet1", + input: []string{"CONFIG_DB", "PORT", "Ethernet1"}, + wantErr: "", + wantPath: []tablePath{ + tablePath{ + dbName: "CONFIG_DB", + tableName: "PORT", + delimitor: "|", + tableKey: "Ethernet1", + }, + }, + }, + { + desc: "CONFIG_DB PORT Ethernet1 alias", + input: []string{"CONFIG_DB", "PORT", "Ethernet1", "alias"}, + wantErr: "", + wantPath: []tablePath{ + tablePath{ + dbName: "CONFIG_DB", + tableName: "PORT", + delimitor: "|", + tableKey: "Ethernet1", + fields: "alias", + }, + }, + }, + { + desc: "CONFIG_DB VLAN_MEMBER Vlan12 PortChannel2", + input: []string{"CONFIG_DB", "VLAN_MEMBER", "Vlan12", "PortChannel2"}, + wantErr: "", + wantPath: []tablePath{ + tablePath{ + dbName: "CONFIG_DB", + tableName: "VLAN_MEMBER", + delimitor: "|", + tableKey: "Vlan12|PortChannel2", + }, + }, + }, + { + desc: "CONFIG_DB VLAN_MEMBER Vlan12 PortChannel2 tagging_mode", + input: []string{"CONFIG_DB", "VLAN_MEMBER", "Vlan12", "PortChannel2", "tagging_mode"}, + wantErr: "", + wantPath: []tablePath{ + tablePath{ + dbName: "CONFIG_DB", + tableName: "VLAN_MEMBER", + delimitor: "|", + tableKey: "Vlan12|PortChannel2", + fields: "tagging_mode", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + rclient.FlushDb() + rclient.HSet("PORT|Ethernet1", "alias", "1") + rclient.HSet("PORT|Ethernet1", "fec", "rs") + rclient.HSet("PORT|Ethernet2", "alias", "2") + rclient.HSet("PORT|Ethernet2", "fec", "rs") + rclient.HSet("VLAN_MEMBER|Vlan12|PortChannel2", "tagging_mode", "tagged") + + gsPath := &GSPath{gpath: test.input} + err := gsPath.GetDbPath(false) + tp := gsPath.tpath + + if err != nil && test.wantErr != "" { + if err.Error() != test.wantErr { + t.Errorf("GetDbPath err (%v) != wantErr (%v)", err, test.wantErr) + } + } else { + if !(err == nil && test.wantErr == "") { + t.Errorf("GetDbPath err (%v) != wantErr (%v)", err, test.wantErr) + } + } + // This testcase not include multiple tablePath like counterPath + if (len(tp) > 1) || (len(test.wantPath) > 1) { + t.Errorf("GetDbPath tablePath length must be smaller than one") + t.Logf("got : %v", tp) + t.Logf("want : %v", test.wantPath) + } else { + if tp != nil && test.wantPath != nil { + if tp[0] != test.wantPath[0] { + t.Errorf("GetDbPath got tablePath != wantPath") + t.Logf("got : %v", tp) + t.Logf("want : %v", test.wantPath) + } + } else { + if !(tp == nil && test.wantPath == nil) { + t.Errorf("GetDbPath got tablePath != wantPath") + t.Logf("got : %v", tp) + t.Logf("want : %v", test.wantPath) + } + } + } + }) + } +} + +func TestGetCfgPath(t *testing.T) { + //flag.Set("alsologtostderr", "true") + //flag.Set("v", "6") + //flag.Parse() + UseRedisLocalTcpPort = true + useRedisTcpClient() + rclient := redis.NewClient(&redis.Options{ + Network: "tcp", + Addr: "localhost:6379", + Password: "", // no password set + DB: 4, + DialTimeout: 0, + }) + _, err := rclient.Ping().Result() + if err != nil { + t.Fatalf("failed to connect to redis server %v", err) + } + defer rclient.Close() + + var tests = []struct { + desc string + input []string + wantErr string + wantPath []tablePath + }{ + { + desc: "Invalid DB", + input: []string{"TEST_DB", "COUNTERS"}, + wantErr: "invaild db target: TEST_DB", + wantPath: nil, + }, + { + desc: "COUNTERS_DB not supported", + input: []string{"COUNTERS_DB", "COUNTERS"}, + wantErr: "config COUNTERS_DB not supported", + wantPath: nil, + }, + { + desc: "CONFIG_DB PORT Ethernet1 not supported", + input: []string{"CONFIG_DB", "PORT", "Ethernet1"}, + wantErr: "config [CONFIG_DB PORT Ethernet1] not supported", + wantPath: nil, + }, + { + desc: "CONFIG_DB TELEMETRY_CLIENT Global", + input: []string{"CONFIG_DB", "TELEMETRY_CLIENT", "Global"}, + wantErr: "", + wantPath: []tablePath{ + tablePath{ + dbName: "CONFIG_DB", + tableName: "TELEMETRY_CLIENT", + tableKey: "Global", + delimitor: "|", + }, + }, + }, + { + //allow set invalid key for xnet subscribe first time + desc: "CONFIG_DB TELEMETRY_CLIENT invalid key", + input: []string{"CONFIG_DB", "TELEMETRY_CLIENT", "port"}, + wantErr: "", + wantPath: []tablePath{ + tablePath{ + dbName: "CONFIG_DB", + tableName: "TELEMETRY_CLIENT", + tableKey: "port", + delimitor: "|", + }, + }, + }, + { + desc: "CONFIG_DB TELEMETRY_CLIENT Global retry_interval", + input: []string{"CONFIG_DB", "TELEMETRY_CLIENT", "Global", "retry_interval"}, + wantErr: "", + wantPath: []tablePath{ + tablePath{ + dbName: "CONFIG_DB", + tableName: "TELEMETRY_CLIENT", + tableKey: "Global", + delimitor: "|", + fields: "retry_interval", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + rclient.FlushDb() + rclient.HSet("TELEMETRY_CLIENT|Global", "retry_interval", "30") + + gsPath := &GSPath{gpath: test.input} + err := gsPath.GetCfgPath() + tp := gsPath.tpath + + if err != nil && test.wantErr != "" { + if err.Error() != test.wantErr { + t.Errorf("GetDbPath err (%v) != wantErr (%v)", err, test.wantErr) + } + } else { + if !(err == nil && test.wantErr == "") { + t.Errorf("GetDbPath err (%v) != wantErr (%v)", err, test.wantErr) + } + } + // This testcase not include multiple tablePath like counterPath + if (len(tp) > 1) || (len(test.wantPath) > 1) { + t.Errorf("GetDbPath tablePath length must be smaller than one") + t.Logf("got : %v", tp) + t.Logf("want : %v", test.wantPath) + } else { + if tp != nil && test.wantPath != nil { + if tp[0] != test.wantPath[0] { + t.Errorf("GetDbPath got tablePath != wantPath") + t.Logf("got : %v", tp) + t.Logf("want : %v", test.wantPath) + } + } else { + if !(tp == nil && test.wantPath == nil) { + t.Errorf("GetDbPath got tablePath != wantPath") + t.Logf("got : %v", tp) + t.Logf("want : %v", test.wantPath) + } + } + } + }) + } + +} diff --git a/sonic_data_client/system.go b/sonic_data_client/system.go new file mode 100644 index 000000000..dabdb66a2 --- /dev/null +++ b/sonic_data_client/system.go @@ -0,0 +1,417 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os/exec" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-yaml/yaml" + "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/mem" + + linuxproc "github.com/c9s/goprocinfo/linux" + log "github.com/golang/glog" +) + +type statsRing struct { + writeIdx uint64 // slot index to write next + buff []*linuxproc.Stat + mu sync.RWMutex // Mutex for data protection +} + +type cpuStat struct { + CpuUsageAll cpuUtil `json:"cpu_all"` + CpuUsage []cpuUtil `json:"cpus"` +} + +// Cpu utilization rate +type cpuUtil struct { + Id string `json:"id"` + CpuUtil_100ms uint64 `json:"100ms"` + CpuUtil_1s uint64 `json:"1s"` + CpuUtil_5s uint64 `json:"5s"` + CpuUtil_1min uint64 `json:"1min"` + CpuUtil_5min uint64 `json:"5min"` +} + +type RXTX struct { + InPut string + OutPut string +} + +var rxtxdata = make(map[string]RXTX) + +const statsRingCap uint64 = 3000 // capacity of statsRing. +var statsR statsRing + +func getCpuUtilPercents(cur, last *linuxproc.CPUStat) uint64 { + curTotal := (cur.User + cur.Nice + cur.System + cur.Idle + cur.IOWait + cur.IRQ + cur.SoftIRQ + cur.Steal + cur.Guest + cur.GuestNice) + lastTotal := (last.User + last.Nice + last.System + last.Idle + last.IOWait + last.IRQ + last.SoftIRQ + last.Steal + last.Guest + last.GuestNice) + idleTicks := cur.Idle - last.Idle + totalTicks := curTotal - lastTotal + return 100 * (totalTicks - idleTicks) / totalTicks +} + +func getCpuUtilStat() *cpuStat { + + stat := cpuStat{} + statsR.mu.RLock() + defer statsR.mu.RUnlock() + + current := (statsR.writeIdx + statsRingCap - 1) % statsRingCap + // Get cpu utilization rate within last 100ms + last := (statsR.writeIdx + statsRingCap - 2) % statsRingCap + if statsR.buff[last] == nil { + return &stat + } + + curCpuStat := statsR.buff[current].CPUStatAll + lastCpuStat := statsR.buff[last].CPUStatAll + + CpuUtil_100ms := getCpuUtilPercents(&curCpuStat, &lastCpuStat) + stat.CpuUsageAll.Id = curCpuStat.Id + stat.CpuUsageAll.CpuUtil_100ms = CpuUtil_100ms + for i, cStat := range statsR.buff[last].CPUStats { + CpuUtil_100ms = getCpuUtilPercents(&statsR.buff[current].CPUStats[i], &cStat) + stat.CpuUsage = append(stat.CpuUsage, cpuUtil{Id: cStat.Id, CpuUtil_100ms: CpuUtil_100ms}) + } + + // Get cpu utilization rate within last 1s (10*100ms) + last = (statsR.writeIdx + statsRingCap - 10) % statsRingCap + if statsR.buff[last] == nil { + return &stat + } + lastCpuStat = statsR.buff[last].CPUStatAll + CpuUtil_1s := getCpuUtilPercents(&curCpuStat, &lastCpuStat) + stat.CpuUsageAll.CpuUtil_1s = CpuUtil_1s + for i, cStat := range statsR.buff[last].CPUStats { + CpuUtil_1s = getCpuUtilPercents(&statsR.buff[current].CPUStats[i], &cStat) + stat.CpuUsage[i].CpuUtil_1s = CpuUtil_1s + } + + // Get cpu utilization rate within last 5s (50*100ms) + last = (statsR.writeIdx + statsRingCap - 50) % statsRingCap + if statsR.buff[last] == nil { + return &stat + } + lastCpuStat = statsR.buff[last].CPUStatAll + CpuUtil_5s := getCpuUtilPercents(&curCpuStat, &lastCpuStat) + stat.CpuUsageAll.CpuUtil_5s = CpuUtil_5s + for i, cStat := range statsR.buff[last].CPUStats { + CpuUtil_5s = getCpuUtilPercents(&statsR.buff[current].CPUStats[i], &cStat) + stat.CpuUsage[i].CpuUtil_5s = CpuUtil_5s + } + + // Get cpu utilization rate within last 1m (600*100ms) + last = (statsR.writeIdx + statsRingCap - 600) % statsRingCap + if statsR.buff[last] == nil { + return &stat + } + lastCpuStat = statsR.buff[last].CPUStatAll + CpuUtil_1min := getCpuUtilPercents(&curCpuStat, &lastCpuStat) + stat.CpuUsageAll.CpuUtil_1min = CpuUtil_1min + for i, cStat := range statsR.buff[last].CPUStats { + CpuUtil_1min = getCpuUtilPercents(&statsR.buff[current].CPUStats[i], &cStat) + stat.CpuUsage[i].CpuUtil_1min = CpuUtil_1min + } + + // Get cpu utilization rate within last 5m (5*600*100ms) + last = (statsR.writeIdx + statsRingCap - 30000) % statsRingCap + if statsR.buff[last] == nil { + return &stat + } + lastCpuStat = statsR.buff[last].CPUStatAll + CpuUtil_5min := getCpuUtilPercents(&curCpuStat, &lastCpuStat) + stat.CpuUsageAll.CpuUtil_5min = CpuUtil_5min + for i, cStat := range statsR.buff[last].CPUStats { + CpuUtil_5min = getCpuUtilPercents(&statsR.buff[current].CPUStats[i], &cStat) + stat.CpuUsage[i].CpuUtil_5min = CpuUtil_5min + } + return &stat +} + +func GetCpuUtil() ([]byte, error) { + cpuStat := getCpuUtilStat() + log.V(4).Infof("getCpuUtil, cpuStat %v", cpuStat) + return marshal(cpuStat) +} + +func pollStats() { + for { + stat, err := linuxproc.ReadStat("/proc/stat") + if err != nil { + log.V(2).Infof("stat read fail") + continue + } + + statsR.mu.Lock() + + statsR.buff[statsR.writeIdx] = stat + statsR.writeIdx++ + statsR.writeIdx %= statsRingCap + statsR.mu.Unlock() + time.Sleep(time.Millisecond * 100) + } +} + +func getcountersdb(key string, filed string) (string, error) { + redisDb := Target2RedisDb["COUNTERS_DB"] + val, err := redisDb.HGet(key, filed).Result() + if err != nil { + log.V(2).Infof("redis HGet failed for %v %v", key, filed) + return val, err + } + return val, err +} +func getrate(oldstr string, newstr string, delta int) (rate string) { + old, _ := strconv.Atoi(oldstr) + new, _ := strconv.Atoi(newstr) + rateint := float32((new - old) / delta) + if rateint > 1024*1024*10 { + rate = fmt.Sprintf("%.2f MB/s", rateint/1024/1024) + } else if rateint > 1024*10 { + rate = fmt.Sprintf("%.2f KB/s", rateint/1024) + } else { + rate = fmt.Sprintf("%.2f B/s", rateint) + } + return rate +} + +func GetRxTxRateTimer() { + interval := 10 + oldcounters := make(map[string]RXTX) + newcounters := make(map[string]RXTX) + countersNameMap := make(map[string]string) + for { + time.Sleep(time.Duration(interval) * time.Second) + countersNameMaptemp, err := getCountersMap("COUNTERS_PORT_NAME_MAP") + if (err != nil) || (len(countersNameMaptemp) == 0) { + continue + } + countersNameMap = countersNameMaptemp + break + } + for { + for port, oid := range countersNameMap { + inputoctet, _ := getcountersdb("COUNTERS:"+oid, "SAI_PORT_STAT_IF_IN_OCTETS") + outputoctet, _ := getcountersdb("COUNTERS:"+oid, "SAI_PORT_STAT_IF_OUT_OCTETS") + oldcounters[port] = RXTX{inputoctet, outputoctet} + } + time.Sleep(time.Duration(interval) * time.Second) + for port, oid := range countersNameMap { + inputoctet, _ := getcountersdb("COUNTERS:"+oid, "SAI_PORT_STAT_IF_IN_OCTETS") + outputoctet, _ := getcountersdb("COUNTERS:"+oid, "SAI_PORT_STAT_IF_OUT_OCTETS") + newcounters[port] = RXTX{inputoctet, outputoctet} + inputrate := getrate(oldcounters[port].InPut, newcounters[port].InPut, interval) + outputrate := getrate(oldcounters[port].OutPut, newcounters[port].OutPut, interval) + rxtxdata[port] = RXTX{inputrate, outputrate} + } + } +} + +func GetRxTxRate() ([]byte, error) { + return marshal(rxtxdata) +} + +func GetConfigdb() ([]byte, error) { + redisDb := Target2RedisDb["CONFIG_DB"] + dbkeys, err := redisDb.Keys("*").Result() + data1 := make(map[string]map[string]map[string]interface{}) + if err != nil { + log.V(2).Infof("redis Keys failed with err %v", err) + return nil, err + } + separator, _ := GetTableKeySeparator("CONFIG_DB") + for _, dbkey := range dbkeys { + data2 := make(map[string]map[string]interface{}) + log.V(6).Infof("\r\n dbkey= %v \r\n", dbkey) + dbkeystr := strings.Split(dbkey, separator) + if len(dbkeystr) <= 1 { + log.V(2).Infof("not get key %v value", dbkeystr) + continue + } + table := dbkeystr[0] + fild := strings.Join(dbkeystr[1:], separator) + fv, err := redisDb.HGetAll(dbkey).Result() + if err != nil { + log.V(2).Infof("redis HGetAll failed dbkey %s", dbkey) + return nil, err + } + data3 := make(map[string]interface{}) + for f, v := range fv { + if strings.HasSuffix(f, "@") { + f = strings.Replace(f, "@", "", -1) + vlist := strings.Split(v, ",") + data3[f] = vlist + } else { + data3[f] = v + } + } + data2[fild] = data3 + if _, exist := data1[table]; exist { + data1[table][fild] = data3 + } else { + data1[table] = data2 + } + } + log.V(6).Infof("\r\n data1= %v \r\n", data1) + return marshal(data1) +} + +func marshal(data interface{}) ([]byte, error) { + j, err := json.Marshal(data) + if err != nil { + log.V(2).Infof("json marshal error %v", err) + return nil, err + } + log.V(6).Infof("marshal json:\n %v", string(j)) + return j, nil +} + +func GetMemInfo() ([]byte, error) { + vmStat, err := mem.VirtualMemory() + if err != nil { + log.V(2).Infof("get memory stat error %v", err) + return nil, err + } + data := map[string]string{ + "UsedPercent": strconv.FormatFloat(vmStat.UsedPercent, 'f', 2, 64), + } + return marshal(data) +} + +func GetDiskUsage() ([]byte, error) { + diskStat, err := disk.Usage("/") + if err != nil { + log.V(2).Infof("get disk stat error %v", err) + return nil, err + } + data := map[string]string{ + "UsedPercent": strconv.FormatFloat(diskStat.UsedPercent, 'f', 2, 64), + } + return marshal(data) +} + +// Get version and build date from sonic_version.yml +func GetVersion() ([]byte, error) { + ver, err := ioutil.ReadFile("/etc/sonic/sonic_version.yml") + if err != nil { + log.V(2).Infof("read version file error %v", err) + return nil, err + } + + m := make(map[string]string) + err = yaml.Unmarshal([]byte(ver), &m) + if err != nil { + log.V(2).Infof("unmarshal version yaml error %v", err) + return nil, err + } + + data := map[string]string{} + if val, ok := m["build_version"]; ok { + data["Software Version"] = val + } + if val, ok := m["build_date"]; ok { + data["Build Date"] = val + } + return marshal(data) +} + +// Get linux command output +func getCommandOut(cmd string) (string, error) { + c := exec.Command("bash", "-c", cmd) + var stdout, stderr bytes.Buffer + c.Stdout = &stdout + c.Stderr = &stderr + err := c.Run() + outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes()) + if errStr != "" { + log.V(2).Infof("exec command %v err: %v", cmd, errStr) + return "", err + } + return outStr, nil +} + +// Get ntp stat by linux command. +// Command output have two type: +// (1) +// unsynchronised +// time server re-starting +// polling server every 8 s +// (2) +// synchronised to NTP server (10.65.254.222) at stratum 4 +// time correct to within 880 ms +// polling server every 64 s +// Return json format: +// { +// "stat":"unsynchronised" +// } +func GetNtpStat() ([]byte, error) { + // If ntpstat is unsynchronised, command exist status is not 0. + outStr, err := getCommandOut("ntpstat || true") + if err != nil { + return nil, err + } + + data := map[string]string{} + if strings.HasPrefix(outStr, "unsynchronised") { + data["stat"] = "unsynchronised" + } else if strings.HasPrefix(outStr, "synchronised") { + data["stat"] = "synchronised" + } else { + return nil, fmt.Errorf("invalid result: %v", outStr) + } + return marshal(data) +} + +// Get last down reason. +// Get the two most recent shutdowns or reboots by "last" command. +// Reboot denotes the system booting up; whereas, shutdown denotes the system going down. +// So a graceful shutdown would show up as reboot preceded by shutdown. +// In contrast, an ungraceful shutdown can be inferred by the omission of shutdown. +func GetDownReason() ([]byte, error) { + outStr, err := getCommandOut("last -n2 -x shutdown reboot") + if err != nil { + return nil, err + } + + log.V(6).Infof("out: %v", outStr) + lines := strings.Split(outStr, "\n") + rebootCnt := 0 + shutdownCnt := 0 + shutdownLine := "" + for _, line := range lines { + if strings.HasPrefix(line, "reboot ") { + rebootCnt += 1 + } else if strings.HasPrefix(line, "shutdown ") { + shutdownCnt += 1 + shutdownLine = strings.TrimPrefix(line, "shutdown ") + } + } + + last := "Unknown" + date := "" + if shutdownCnt == 1 && rebootCnt == 1 { + d := strings.Split(shutdownLine, " ") + last = d[0] + date = d[2] + } + + data := map[string]string{ + "last": last, + "date": date, + } + return marshal(data) +} + +func init() { + statsR.buff = make([]*linuxproc.Stat, statsRingCap) + go pollStats() + go GetRxTxRateTimer() +} diff --git a/sonic_data_client/virtual_db.go b/sonic_data_client/virtual_db.go deleted file mode 100644 index 6a9e037dd..000000000 --- a/sonic_data_client/virtual_db.go +++ /dev/null @@ -1,221 +0,0 @@ -package client - -import ( - "fmt" - log "github.com/golang/glog" - "strings" -) - -// virtual db is to Handle -// <1> different set of redis db data aggreggation -// <2> or non default TARGET_DEFINED stream subscription - -// For virtual db path -const ( - DbIdx uint = iota // DB name is the first element (no. 0) in path slice. - TblIdx // Table name is the second element (no. 1) in path slice. - KeyIdx // Key name is the first element (no. 2) in path slice. - FieldIdx // Field name is the first element (no. 3) in path slice. -) - -type v2rTranslate func([]string) ([]tablePath, error) - -type pathTransFunc struct { - path []string - transFunc v2rTranslate -} - -var ( - v2rTrie *Trie - - // Port name to oid map in COUNTERS table of COUNTERS_DB - countersPortNameMap = make(map[string]string) - - // Queue name to oid map in COUNTERS table of COUNTERS_DB - countersQueueNameMap = make(map[string]string) - - // path2TFuncTbl is used to populate trie tree which is reponsible - // for virtual path to real data path translation - pathTransFuncTbl = []pathTransFunc{ - { // stats for one or all Ethernet ports - path: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*"}, - transFunc: v2rTranslate(v2rEthPortStats), - }, { // specific field stats for one or all Ethernet ports - path: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*", "*"}, - transFunc: v2rTranslate(v2rEthPortFieldStats), - }, { // Queue stats for one or all Ethernet ports - path: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*", "Queues"}, - transFunc: v2rTranslate(v2rEthPortQueStats), - }, - } -) - -func (t *Trie) v2rTriePopulate() { - for _, pt := range pathTransFuncTbl { - n := t.Add(pt.path, pt.transFunc) - if n.meta.(v2rTranslate) == nil { - log.V(1).Infof("Failed to add trie node for %v with %v", pt.path, pt.transFunc) - } else { - log.V(2).Infof("Add trie node for %v with %v", pt.path, pt.transFunc) - } - - } -} - -func initCountersQueueNameMap() error { - var err error - if len(countersQueueNameMap) == 0 { - countersQueueNameMap, err = getCountersMap("COUNTERS_QUEUE_NAME_MAP") - if err != nil { - return err - } - } - return nil -} - -func initCountersPortNameMap() error { - var err error - if len(countersPortNameMap) == 0 { - countersPortNameMap, err = getCountersMap("COUNTERS_PORT_NAME_MAP") - if err != nil { - return err - } - } - return nil -} - -// Get the mapping between objects in counters DB, Ex. port name to oid in "COUNTERS_PORT_NAME_MAP" table. -// Aussuming static port name to oid map in COUNTERS table -func getCountersMap(tableName string) (map[string]string, error) { - redisDb, _ := Target2RedisDb["COUNTERS_DB"] - fv, err := redisDb.HGetAll(tableName).Result() - if err != nil { - log.V(2).Infof("redis HGetAll failed for COUNTERS_DB, tableName: %s", tableName) - return nil, err - } - log.V(6).Infof("tableName: %s, map %v", tableName, fv) - return fv, nil -} - -// Populate real data paths from paths like -// [COUNTER_DB COUNTERS Ethernet*] or [COUNTER_DB COUNTERS Ethernet68] -func v2rEthPortStats(paths []string) ([]tablePath, error) { - separator, _ := GetTableKeySeparator(paths[DbIdx]) - var tblPaths []tablePath - if strings.HasSuffix(paths[KeyIdx], "*") { // All Ethernet ports - for port, oid := range countersPortNameMap { - tblPath := tablePath{ - dbName: paths[DbIdx], - tableName: paths[TblIdx], - tableKey: oid, - delimitor: separator, - jsonTableKey: port, - } - tblPaths = append(tblPaths, tblPath) - } - } else { //single port - oid, ok := countersPortNameMap[paths[KeyIdx]] - if !ok { - return nil, fmt.Errorf(" %v not a valid port ", paths[KeyIdx]) - } - tblPaths = []tablePath{{ - dbName: paths[DbIdx], - tableName: paths[TblIdx], - tableKey: oid, - delimitor: separator, - }} - } - log.V(6).Infof("v2rEthPortStats: %v", tblPaths) - return tblPaths, nil -} - -// Supported cases: -// <1> port name having suffix of "*" with specific field; -// Ex. [COUNTER_DB COUNTERS Ethernet* SAI_PORT_STAT_PFC_0_RX_PKTS] -// <2> exact port name with specific field. -// Ex. [COUNTER_DB COUNTERS Ethernet68 SAI_PORT_STAT_PFC_0_RX_PKTS] -// case of "*" field could be covered in v2rEthPortStats() -func v2rEthPortFieldStats(paths []string) ([]tablePath, error) { - separator, _ := GetTableKeySeparator(paths[DbIdx]) - var tblPaths []tablePath - if strings.HasSuffix(paths[KeyIdx], "*") { - for port, oid := range countersPortNameMap { - tblPath := tablePath{ - dbName: paths[DbIdx], - tableName: paths[TblIdx], - tableKey: oid, - field: paths[FieldIdx], - delimitor: separator, - jsonTableKey: port, - jsonField: paths[FieldIdx], - } - tblPaths = append(tblPaths, tblPath) - } - } else { //single port - oid, ok := countersPortNameMap[paths[KeyIdx]] - if !ok { - return nil, fmt.Errorf(" %v not a valid port ", paths[KeyIdx]) - } - tblPaths = []tablePath{{ - dbName: paths[DbIdx], - tableName: paths[TblIdx], - tableKey: oid, - field: paths[FieldIdx], - delimitor: separator, - }} - } - log.V(6).Infof("v2rEthPortFieldStats: %+v", tblPaths) - return tblPaths, nil -} - -// Populate real data paths from paths like -// [COUNTER_DB COUNTERS Ethernet* Queues] or [COUNTER_DB COUNTERS Ethernet68 Queues] -func v2rEthPortQueStats(paths []string) ([]tablePath, error) { - separator, _ := GetTableKeySeparator(paths[DbIdx]) - var tblPaths []tablePath - if strings.HasSuffix(paths[KeyIdx], "*") { // queues on all Ethernet ports - for que, oid := range countersQueueNameMap { - tblPath := tablePath{ - dbName: paths[DbIdx], - tableName: paths[TblIdx], - tableKey: oid, - delimitor: separator, - jsonTableKey: que, - } - tblPaths = append(tblPaths, tblPath) - } - } else { //queues on single port - portName := paths[KeyIdx] - for que, oid := range countersQueueNameMap { - //que is in formate of "Ethernet64:12" - names := strings.Split(que, separator) - if portName != names[0] { - continue - } - tblPath := tablePath{ - dbName: paths[DbIdx], - tableName: paths[TblIdx], - tableKey: oid, - delimitor: separator, - jsonTableKey: que, - } - tblPaths = append(tblPaths, tblPath) - } - } - log.V(6).Infof("v2rEthPortQueStats: %v", tblPaths) - return tblPaths, nil -} - -func lookupV2R(paths []string) ([]tablePath, error) { - n, ok := v2rTrie.Find(paths) - if ok { - v2rTrans := n.meta.(v2rTranslate) - return v2rTrans(paths) - } - return nil, fmt.Errorf("%v not found in virtual path tree", paths) -} - -func init() { - v2rTrie = NewTrie() - v2rTrie.v2rTriePopulate() -} diff --git a/sonic_data_client/vpath.go b/sonic_data_client/vpath.go new file mode 100644 index 000000000..cfaee8de8 --- /dev/null +++ b/sonic_data_client/vpath.go @@ -0,0 +1,318 @@ +package client + +import ( + "fmt" + log "github.com/golang/glog" + "strings" +) + +// virtual db is to Handle +// <1> different set of redis db data aggreggation +// <2> or non default TARGET_DEFINED stream subscription + +// For virtual db path +const ( + DbIdx uint = iota // DB name is the first element (no. 0) in path slice. + TblIdx // Table name is the second element (no. 1) in path slice. + KeyIdx // Key name is the first element (no. 2) in path slice. + FieldIdx // Field name is the first element (no. 3) in path slice. +) + +type v2rTranslate func([]string) ([]tablePath, error) + +type pathTransFunc struct { + path []string + transFunc v2rTranslate +} + +var ( + v2rTrie *Trie + + supportedCounterFields = map[string][]string{} + + countersNameOidTbls = map[string]map[string]string{ + // Port name to oid map in COUNTERS table of COUNTERS_DB + "COUNTERS_PORT_NAME_MAP": make(map[string]string), + // Queue name to oid map in COUNTERS table of COUNTERS_DB + "COUNTERS_QUEUE_NAME_MAP": make(map[string]string), + // PG name to oid map in COUNTERS table of COUNTERS_DB + "COUNTERS_INGRESS_PRIORITY_GROUP_NAME_MAP": make(map[string]string), + // Buffer pool name to oid map in COUNTERS table of COUNTERS_DB + "COUNTERS_BUFFER_POOL_NAME_MAP": make(map[string]string), + // Cput Trap Group to oid map in COUNTERS table of COUNTERS_DB + "COUNTERS_TRAPGROUP_POLICER_MAP": make(map[string]string), + } + + // path2TFuncTbl is used to populate trie tree which is reponsible + // for virtual path to real data path translation + pathTransFuncTbl = []pathTransFunc{ + { // stats for one or all Ethernet ports + path: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*"}, + transFunc: v2rTranslate(v2rEthPortStats), + }, { // specific field stats for one or all Ethernet ports + path: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*", "*"}, + transFunc: v2rTranslate(v2rEthPortStats), + }, { // queue stats for one or all Ethernet ports + path: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*", "Queues"}, + transFunc: v2rTranslate(v2rEthPortQueStats), + }, { // specific queue stats for one or all Ethernet ports + path: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*", "Queues", "*"}, + transFunc: v2rTranslate(v2rEthPortQueStats), + }, + } +) + +func (t *Trie) v2rTriePopulate() { + for _, pt := range pathTransFuncTbl { + n := t.Add(pt.path, pt.transFunc) + if n.meta.(v2rTranslate) == nil { + log.V(1).Infof("Failed to add trie node for %v with %v", pt.path, pt.transFunc) + } else { + log.V(2).Infof("Add trie node for %v with %v", pt.path, pt.transFunc) + } + + } +} + +func initCountersNameMap() error { + var err error + for name, tbl := range countersNameOidTbls { + if len(tbl) == 0 { + countersNameOidTbls[name], err = getCountersMap(name) + if err != nil { + return err + } + supportedCounterFields[name], err = getSupportedFields(name) + if err != nil { + return err + } + } + } + return nil +} + +// Get the mapping between objects in counters DB, Ex. port name to oid in "COUNTERS_PORT_NAME_MAP" table. +// Aussuming static port name to oid map in COUNTERS table +func getCountersMap(tableName string) (map[string]string, error) { + redisDb, _ := Target2RedisDb["COUNTERS_DB"] + fv, err := redisDb.HGetAll(tableName).Result() + if err != nil { + log.V(2).Infof("redis HGetAll failed for COUNTERS_DB, tableName: %s", tableName) + return nil, err + } + log.V(6).Infof("tableName: %s, map %v", tableName, fv) + return fv, nil +} + +// Get suppored fields for counters +func getSupportedFields(tableName string) ([]string, error) { + redisDb, _ := Target2RedisDb["COUNTERS_DB"] + for _, oid := range countersNameOidTbls[tableName] { + statKey := "COUNTERS:" + oid + keys, err := redisDb.HKeys(statKey).Result() + if err != nil { + log.V(2).Infof("redis HKeys failed for COUNTERS_DB, tableName: %s", tableName) + return nil, err + } + if len(keys) <= 0 { + log.V(2).Infof("supported fields empty, tableName: %s", tableName) + } + log.V(6).Infof("supported fields: %v", keys) + return keys, nil + } + return nil, nil +} + +// Get match fields from supported fields +func getMatchFields(field string, name string) (string, error) { + // the field is prefixed with "SAI" + if !strings.HasPrefix(field, "SAI") { + return "", nil + } + // don't lookup field without wildcard + if !strings.HasSuffix(field, "*") { + return field, nil + } + + var fs string + matchFields := []string{} + supportedFields := supportedCounterFields[name] + + if supportedFields != nil { + fieldPrefix := strings.TrimSuffix(field, "*") + for _, v := range supportedFields { + if strings.HasPrefix(v, fieldPrefix) { + matchFields = append(matchFields, v) + } + } + + if len(matchFields) <= 0 { + return "", fmt.Errorf("%v has no match fields", field) + } + // multiple fields are separated with "," + for _, f := range matchFields { + if fs != "" { + fs = fs + "," + f + } else { + fs = f + } + } + } + + return fs, nil +} + +// Supported cases: +// <1> port name paths without field +// Ex. [COUNTER_DB COUNTERS Ethernet*] +// [COUNTER_DB COUNTERS Ethernet68] +// <2> port name having suffix of "*" with specific field; +// Ex. [COUNTER_DB COUNTERS Ethernet* SAI_PORT_STAT_PFC_0_RX_PKTS] +// [COUNTER_DB COUNTERS Ethernet* SAI_PORT_STAT_PFC_*] +// <3> exact port name with specific field. +// Ex. [COUNTER_DB COUNTERS Ethernet68 SAI_PORT_STAT_PFC_0_RX_PKTS] +// [COUNTER_DB COUNTERS Ethernet68 SAI_PORT_STAT_PFC_*] +func v2rEthPortStats(paths []string) ([]tablePath, error) { + separator, _ := GetTableKeySeparator(paths[DbIdx]) + countersNameMap := countersNameOidTbls["COUNTERS_PORT_NAME_MAP"] + var tblPaths []tablePath + var fields string + + fields, err := getMatchFields(paths[len(paths)-1], "COUNTERS_PORT_NAME_MAP") + if err != nil { + return nil, err + } + + if strings.HasSuffix(paths[KeyIdx], "*") { //all ports + for port, oid := range countersNameMap { + tblPath := tablePath{ + dbName: paths[DbIdx], + tableName: paths[TblIdx], + tableKey: oid, + fields: fields, + delimitor: separator, + jsonTableKey: port, + jsonFields: fields, + } + tblPaths = append(tblPaths, tblPath) + } + } else { //single port + oid, ok := countersNameMap[paths[KeyIdx]] + if !ok { + return nil, fmt.Errorf(" %v not a valid port ", paths[KeyIdx]) + } + + // When fields is a single field, jsonFields isn't filled up + jf := "" + if strings.Contains(fields, ",") { + jf = fields + } + tblPaths = []tablePath{{ + dbName: paths[DbIdx], + tableName: paths[TblIdx], + tableKey: oid, + fields: fields, + delimitor: separator, + jsonFields: jf, + }} + } + log.V(6).Infof("v2rEthPortStats: tblPaths %+v", tblPaths) + return tblPaths, nil +} + +func v2rCpuStats(paths []string) ([]tablePath, error) { + separator, _ := GetTableKeySeparator(paths[DbIdx]) + countersNameMap := countersNameOidTbls["COUNTERS_TRAPGROUP_POLICER_MAP"] + var tblPaths []tablePath + var fields string + + fields, err := getMatchFields(paths[len(paths)-1], "COUNTERS_TRAPGROUP_POLICER_MAP") + + if err != nil { + return nil, err + } + + for trapname, oid := range countersNameMap { + tblPath := tablePath{ + dbName: paths[DbIdx], + tableName: paths[TblIdx], + tableKey: oid, + fields: fields, + delimitor: separator, + jsonTableKey: trapname, + jsonFields: fields, + } + tblPaths = append(tblPaths, tblPath) + } + + log.V(6).Infof("v2rCpuStats: tblPaths %+v", tblPaths) + return tblPaths, nil +} + +func v2rQStatsGeneric(paths []string, mapTblName string) ([]tablePath, error) { + separator, _ := GetTableKeySeparator(paths[DbIdx]) + countersNameMap := countersNameOidTbls[mapTblName] + var tblPaths []tablePath + var fields string + + fields, err := getMatchFields(paths[len(paths)-1], mapTblName) + if err != nil { + return nil, err + } + + if strings.HasSuffix(paths[KeyIdx], "*") { //all ports + for q, oid := range countersNameMap { + tblPath := tablePath{ + dbName: paths[DbIdx], + tableName: paths[TblIdx], + tableKey: oid, + fields: fields, + delimitor: separator, + jsonTableKey: q, + jsonFields: fields, + } + tblPaths = append(tblPaths, tblPath) + } + } else { //single port + portName := paths[KeyIdx] + for q, oid := range countersNameMap { + //que is in formate of "Ethernet64:12" + names := strings.Split(q, separator) + if portName != names[0] { + continue + } + + tblPath := tablePath{ + dbName: paths[DbIdx], + tableName: paths[TblIdx], + tableKey: oid, + fields: fields, + delimitor: separator, + jsonTableKey: q, + jsonFields: fields, + } + tblPaths = append(tblPaths, tblPath) + } + } + log.V(6).Infof("v2rGenericStats: mapTblName %s tblPaths %+v", mapTblName, tblPaths) + return tblPaths, nil +} + +func v2rEthPortQueStats(paths []string) ([]tablePath, error) { + tblPaths, err := v2rQStatsGeneric(paths, "COUNTERS_QUEUE_NAME_MAP") + return tblPaths, err +} + +func getv2rPath(paths []string) ([]tablePath, error) { + n, ok := v2rTrie.Find(paths) + if ok { + v2rTrans := n.meta.(v2rTranslate) + return v2rTrans(paths) + } + return nil, fmt.Errorf("%v not found in virtual path tree", paths) +} + +func init() { + v2rTrie = NewTrie() + v2rTrie.v2rTriePopulate() +} diff --git a/sonic_data_client/vpath_test.go b/sonic_data_client/vpath_test.go new file mode 100644 index 000000000..97b386368 --- /dev/null +++ b/sonic_data_client/vpath_test.go @@ -0,0 +1,420 @@ +package client + +import ( + "sort" + "testing" +) + +type tblPathSlice []tablePath + +func (a tblPathSlice) Len() int { + return len(a) +} + +func (a tblPathSlice) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a tblPathSlice) Less(i, j int) bool { + if a[i].dbName != a[j].dbName { + return a[i].dbName < a[j].dbName + } + if a[i].tableName != a[j].tableName { + return a[i].tableName < a[j].tableName + } + if a[i].tableKey != a[j].tableKey { + return a[i].tableKey < a[j].tableKey + } + if a[i].delimitor != a[j].delimitor { + return a[i].delimitor < a[j].delimitor + } + if a[i].fields != a[j].fields { + return a[i].fields < a[j].fields + } + if a[i].jsonTableKey != a[j].jsonTableKey { + return a[i].jsonTableKey < a[j].jsonTableKey + } + + return a[i].jsonFields < a[j].jsonFields +} + +func mock_initCountersNameMap() { + countersNameOidTbls["COUNTERS_PORT_NAME_MAP"] = map[string]string{ + "Ethernet1": "oid:0x1000000000001", + "Ethernet2": "oid:0x1000000000002", + } + + countersNameOidTbls["COUNTERS_QUEUE_NAME_MAP"] = map[string]string{ + "Ethernet1:0": "oid:0x15000000000010", + "Ethernet1:1": "oid:0x15000000000011", + "Ethernet2:0": "oid:0x15000000000020", + "Ethernet2:1": "oid:0x15000000000021", + } + + countersNameOidTbls["COUNTERS_BUFFER_POOL_NAME_MAP"] = map[string]string{ + "BUFFER_POOL_0": "oid:0x18000000000000", + "BUFFER_POOL_1": "oid:0x18000000000001", + } + + supportedCounterFields["COUNTERS_PORT_NAME_MAP"] = []string{ + "SAI_PORT_STAT_IF_IN_OCTETS", + "SAI_PORT_STAT_IF_IN_UCAST_PKTS", + "SAI_PORT_STAT_IF_OUT_OCTETS", + "SAI_PORT_STAT_IF_OUT_UCAST_PKTS", + } + + supportedCounterFields["COUNTERS_QUEUE_NAME_MAP"] = []string{ + "SAI_QUEUE_STAT_PACKETS", + "SAI_QUEUE_STAT_BYTES", + "SAI_QUEUE_STAT_DROPPED_PACKETS", + "SAI_QUEUE_STAT_DROPPED_BYTES", + } + + supportedCounterFields["COUNTERS_BUFFER_POOL_NAME_MAP"] = []string{ + "SAI_BUFFER_POOL_STAT_CURR_OCCUPANCY_BYTES", + "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES", + } +} + +func TestGetV2RPath(t *testing.T) { + mock_initCountersNameMap() + + var tests = []struct { + desc string + input []string + want []tablePath + }{ + { + desc: "COUNTERS_DB/COUNTERS/Ethernet*", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x1000000000001", + delimitor: ":", + fields: "", + jsonTableKey: "Ethernet1", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x1000000000002", + delimitor: ":", + fields: "", + jsonTableKey: "Ethernet2", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet*/SAI_PORT_STAT_IF_IN_OCTETS", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*", "SAI_PORT_STAT_IF_IN_OCTETS"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x1000000000001", + delimitor: ":", + fields: "SAI_PORT_STAT_IF_IN_OCTETS", + jsonTableKey: "Ethernet1", + jsonFields: "SAI_PORT_STAT_IF_IN_OCTETS", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x1000000000002", + delimitor: ":", + fields: "SAI_PORT_STAT_IF_IN_OCTETS", + jsonTableKey: "Ethernet2", + jsonFields: "SAI_PORT_STAT_IF_IN_OCTETS", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet*/SAI_PORT_STAT_IF_IN_*", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*", "SAI_PORT_STAT_IF_IN_*"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x1000000000001", + delimitor: ":", + fields: "SAI_PORT_STAT_IF_IN_OCTETS,SAI_PORT_STAT_IF_IN_UCAST_PKTS", + jsonTableKey: "Ethernet1", + jsonFields: "SAI_PORT_STAT_IF_IN_OCTETS,SAI_PORT_STAT_IF_IN_UCAST_PKTS", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x1000000000002", + delimitor: ":", + fields: "SAI_PORT_STAT_IF_IN_OCTETS,SAI_PORT_STAT_IF_IN_UCAST_PKTS", + jsonTableKey: "Ethernet2", + jsonFields: "SAI_PORT_STAT_IF_IN_OCTETS,SAI_PORT_STAT_IF_IN_UCAST_PKTS", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet2", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet2"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x1000000000002", + delimitor: ":", + fields: "", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet2/SAI_PORT_STAT_IF_IN_OCTETS", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet2", "SAI_PORT_STAT_IF_IN_OCTETS"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x1000000000002", + delimitor: ":", + fields: "SAI_PORT_STAT_IF_IN_OCTETS", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet2/SAI_PORT_STAT_IF_IN_*", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet2", "SAI_PORT_STAT_IF_IN_*"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x1000000000002", + delimitor: ":", + fields: "SAI_PORT_STAT_IF_IN_OCTETS,SAI_PORT_STAT_IF_IN_UCAST_PKTS", + jsonFields: "SAI_PORT_STAT_IF_IN_OCTETS,SAI_PORT_STAT_IF_IN_UCAST_PKTS", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet*/Queues", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*", "Queues"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000010", + delimitor: ":", + fields: "", + jsonTableKey: "Ethernet1:0", + jsonFields: "", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000011", + delimitor: ":", + fields: "", + jsonTableKey: "Ethernet1:1", + jsonFields: "", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000020", + delimitor: ":", + fields: "", + jsonTableKey: "Ethernet2:0", + jsonFields: "", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000021", + delimitor: ":", + fields: "", + jsonTableKey: "Ethernet2:1", + jsonFields: "", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet*/Queues/SAI_QUEUE_STAT_DROPPED_PACKETS", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*", "Queues", "SAI_QUEUE_STAT_DROPPED_PACKETS"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000010", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + jsonTableKey: "Ethernet1:0", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000011", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + jsonTableKey: "Ethernet1:1", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000020", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + jsonTableKey: "Ethernet2:0", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000021", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + jsonTableKey: "Ethernet2:1", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet*/Queues/SAI_QUEUE_STAT_DROPPED*", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet*", "Queues", "SAI_QUEUE_STAT_DROPPED*"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000010", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + jsonTableKey: "Ethernet1:0", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000011", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + jsonTableKey: "Ethernet1:1", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000020", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + jsonTableKey: "Ethernet2:0", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000021", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + jsonTableKey: "Ethernet2:1", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet2/Queues", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet2", "Queues"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000020", + delimitor: ":", + fields: "", + jsonTableKey: "Ethernet2:0", + jsonFields: "", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000021", + delimitor: ":", + fields: "", + jsonTableKey: "Ethernet2:1", + jsonFields: "", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet2/Queues/SAI_QUEUE_STAT_DROPPED_PACKETS", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet2", "Queues", "SAI_QUEUE_STAT_DROPPED_PACKETS"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000020", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + jsonTableKey: "Ethernet2:0", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000021", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + jsonTableKey: "Ethernet2:1", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS", + }, + }, + }, + { + desc: "COUNTERS_DB/COUNTERS/Ethernet2/Queues/SAI_QUEUE_STAT_DROPPED*", + input: []string{"COUNTERS_DB", "COUNTERS", "Ethernet2", "Queues", "SAI_QUEUE_STAT_DROPPED*"}, + want: []tablePath{ + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000020", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + jsonTableKey: "Ethernet2:0", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + }, + { + dbName: "COUNTERS_DB", + tableName: "COUNTERS", + tableKey: "oid:0x15000000000021", + delimitor: ":", + fields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + jsonTableKey: "Ethernet2:1", + jsonFields: "SAI_QUEUE_STAT_DROPPED_PACKETS,SAI_QUEUE_STAT_DROPPED_BYTES", + }, + }, + }, + } + + for _, test := range tests { + //got, err := getv2rPath(test.input) + t.Run(test.desc, func(t *testing.T) { + got, err := getv2rPath(test.input) + + if len(got) != len(test.want) { + t.Errorf("getv2rPath err: %v input: %q got != want", err, test.input) + t.Logf("got : %v", got) + t.Logf("want: %v", test.want) + } else { + sort.Sort(tblPathSlice(got)) + sort.Sort(tblPathSlice(test.want)) + for i, g := range got { + if g != test.want[i] { + t.Errorf("getv2rPath err: %v input: (%q) element[%v] isn't wanted %v", err, test.input, i, g) + t.Logf("got = %v", got) + t.Logf("want = %v", test.want) + } + } + } + }) + } + +} diff --git a/testdata/COUNTERS:Ethernet1.txt b/testdata/COUNTERS-Ethernet1.txt similarity index 100% rename from testdata/COUNTERS:Ethernet1.txt rename to testdata/COUNTERS-Ethernet1.txt diff --git a/testdata/COUNTERS:Ethernet68:Queues.txt b/testdata/COUNTERS-Ethernet68-Queues.txt similarity index 100% rename from testdata/COUNTERS:Ethernet68:Queues.txt rename to testdata/COUNTERS-Ethernet68-Queues.txt diff --git a/testdata/COUNTERS:Ethernet68.txt b/testdata/COUNTERS-Ethernet68.txt similarity index 100% rename from testdata/COUNTERS:Ethernet68.txt rename to testdata/COUNTERS-Ethernet68.txt diff --git a/testdata/COUNTERS:Ethernet_wildcard.txt b/testdata/COUNTERS-Ethernet_wildcard.txt similarity index 100% rename from testdata/COUNTERS:Ethernet_wildcard.txt rename to testdata/COUNTERS-Ethernet_wildcard.txt diff --git a/testdata/COUNTERS:Ethernet_wildcard_PFC_7_RX.txt b/testdata/COUNTERS-Ethernet_wildcard_PFC_7_RX.txt similarity index 100% rename from testdata/COUNTERS:Ethernet_wildcard_PFC_7_RX.txt rename to testdata/COUNTERS-Ethernet_wildcard_PFC_7_RX.txt diff --git a/testdata/COUNTERS:Ethernet_wildcard_Queues.txt b/testdata/COUNTERS-Ethernet_wildcard_Queues.txt similarity index 100% rename from testdata/COUNTERS:Ethernet_wildcard_Queues.txt rename to testdata/COUNTERS-Ethernet_wildcard_Queues.txt diff --git a/testdata/COUNTERS:oid:0x1500000000091c.txt b/testdata/COUNTERS-oid-0x1500000000091c.txt similarity index 100% rename from testdata/COUNTERS:oid:0x1500000000091c.txt rename to testdata/COUNTERS-oid-0x1500000000091c.txt diff --git a/testdata/COUNTERS:oid:0x1500000000092a.txt b/testdata/COUNTERS-oid-0x1500000000092a.txt similarity index 100% rename from testdata/COUNTERS:oid:0x1500000000092a.txt rename to testdata/COUNTERS-oid-0x1500000000092a.txt