diff --git a/Makefile b/Makefile index 2813f3f6..ac68f5a9 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,14 @@ TEST_FILES=$(wildcard *_test.go) TELEMETRY_TEST_DIR = build/tests/gnmi_server TELEMETRY_TEST_BIN = $(TELEMETRY_TEST_DIR)/server.test ifeq ($(ENABLE_TRANSLIB_WRITE),y) -BLD_FLAGS := -tags gnmi_translib_write +BLD_TAGS := gnmi_translib_write +endif +ifeq ($(ENABLE_NATIVE_WRITE),y) +BLD_TAGS := $(BLD_TAGS) gnmi_native_write +endif + +ifneq ($(BLD_TAGS),) +BLD_FLAGS := -tags "$(strip $(BLD_TAGS))" endif GO_DEPS := vendor/.done @@ -41,6 +48,8 @@ $(GO_DEPS): go.mod $(PATCHES) patch -d vendor -p0 < patches/gnmi_cli.all.patch patch -d vendor -p0 < patches/gnmi_set.patch patch -d vendor -p0 < patches/gnmi_get.patch + patch -d vendor -p0 < patches/gnmi_path.patch + patch -d vendor -p0 < patches/gnmi_xpath.patch git apply patches/0001-Updated-to-filter-and-write-to-file.patch touch $@ @@ -49,7 +58,7 @@ go-deps: $(GO_DEPS) go-deps-clean: $(RM) -r vendor -sonic-gnmi: $(GO_DEPS) +sonic-gnmi: $(GO_DEPS) libswss ifeq ($(CROSS_BUILD_ENVIRON),y) $(GO) build -o ${GOBIN}/telemetry -mod=vendor $(BLD_FLAGS) github.com/sonic-net/sonic-gnmi/telemetry $(GO) build -o ${GOBIN}/dialout_client_cli -mod=vendor $(BLD_FLAGS) github.com/sonic-net/sonic-gnmi/dialout/dialout_client_cli @@ -57,6 +66,7 @@ ifeq ($(CROSS_BUILD_ENVIRON),y) $(GO) build -o ${GOBIN}/gnmi_set -mod=vendor github.com/jipanyang/gnxi/gnmi_set $(GO) build -o ${GOBIN}/gnmi_cli -mod=vendor github.com/openconfig/gnmi/cmd/gnmi_cli $(GO) build -o ${GOBIN}/gnoi_client -mod=vendor github.com/sonic-net/sonic-gnmi/gnoi_client + $(GO) build -o ${GOBIN}/gnmi_dump -mod=vendor github.com/sonic-net/sonic-gnmi/gnmi_dump else $(GO) install -mod=vendor $(BLD_FLAGS) github.com/sonic-net/sonic-gnmi/telemetry $(GO) install -mod=vendor $(BLD_FLAGS) github.com/sonic-net/sonic-gnmi/dialout/dialout_client_cli @@ -64,16 +74,24 @@ else $(GO) install -mod=vendor github.com/jipanyang/gnxi/gnmi_set $(GO) install -mod=vendor github.com/openconfig/gnmi/cmd/gnmi_cli $(GO) install -mod=vendor github.com/sonic-net/sonic-gnmi/gnoi_client + $(GO) install -mod=vendor github.com/sonic-net/sonic-gnmi/gnmi_dump endif +# TODO: Create a new repo for this lib, sonic-restapi and sonic-gnmi can share this lib +libswss: + make -C libcswsscommon + sudo make -C libcswsscommon install + check_gotest: sudo mkdir -p ${DBDIR} sudo cp ./testdata/database_config.json ${DBDIR} sudo mkdir -p /usr/models/yang || true sudo find $(MGMT_COMMON_DIR)/models -name '*.yang' -exec cp {} /usr/models/yang/ \; sudo $(GO) test -coverprofile=coverage-config.txt -covermode=atomic -v github.com/sonic-net/sonic-gnmi/sonic_db_config - sudo $(GO) test -coverprofile=coverage-gnmi.txt -covermode=atomic -mod=vendor $(BLD_FLAGS) -v github.com/sonic-net/sonic-gnmi/gnmi_server + sudo $(GO) test -coverprofile=coverage-gnmi.txt -covermode=atomic -mod=vendor $(BLD_FLAGS) -v github.com/sonic-net/sonic-gnmi/gnmi_server -coverpkg ../... sudo $(GO) test -coverprofile=coverage-dialcout.txt -covermode=atomic -mod=vendor $(BLD_FLAGS) -v github.com/sonic-net/sonic-gnmi/dialout/dialout_client + sudo $(GO) test -coverprofile=coverage-data.txt -covermode=atomic -mod=vendor -v github.com/sonic-net/sonic-gnmi/sonic_data_client + sudo $(GO) test -coverprofile=coverage-dbus.txt -covermode=atomic -mod=vendor -v github.com/sonic-net/sonic-gnmi/sonic_service_client $(GO) get github.com/axw/gocov/... $(GO) get github.com/AlekSi/gocov-xml gocov convert coverage-*.txt | gocov-xml -source $(shell pwd) > coverage.xml @@ -99,6 +117,7 @@ install: $(INSTALL) -D $(BUILD_DIR)/gnmi_set $(DESTDIR)/usr/sbin/gnmi_set $(INSTALL) -D $(BUILD_DIR)/gnmi_cli $(DESTDIR)/usr/sbin/gnmi_cli $(INSTALL) -D $(BUILD_DIR)/gnoi_client $(DESTDIR)/usr/sbin/gnoi_client + $(INSTALL) -D $(BUILD_DIR)/gnmi_dump $(DESTDIR)/usr/sbin/gnmi_dump deinstall: @@ -107,5 +126,6 @@ deinstall: rm $(DESTDIR)/usr/sbin/gnmi_get rm $(DESTDIR)/usr/sbin/gnmi_set rm $(DESTDIR)/usr/sbin/gnoi_client + rm $(DESTDIR)/usr/sbin/gnmi_dump diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 39acf6fb..5506b3d6 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -40,7 +40,7 @@ stages: DIFF_COVER_WORKING_DIRECTORY: $(System.DefaultWorkingDirectory)/sonic-gnmi container: - image: sonicdev-microsoft.azurecr.io:443/sonic-slave-buster:latest + image: sonicdev-microsoft.azurecr.io:443/sonic-slave-bullseye:latest steps: - checkout: self @@ -79,6 +79,9 @@ stages: displayName: "Download sonic-mgmt-common" - script: | + # PYTEST + sudo pip3 install -U pytest + # REDIS sudo apt-get install -y redis-server sudo sed -ri 's/^# unixsocket/unixsocket/' /etc/redis/redis.conf @@ -87,47 +90,32 @@ stages: sudo service redis-server start # LIBYANG - sudo dpkg -i ../target/debs/buster/libyang*1.0.73*.deb + sudo dpkg -i ../target/debs/bullseye/libyang*1.0.73*.deb displayName: "Install dependency" - script: | # LIBSWSSCOMMON sudo apt-get -y purge libhiredis-dev libnl-3-dev libnl-route-3-dev - sudo dpkg -i ../target/debs/buster/libnl-3-200_*.deb - sudo dpkg -i ../target/debs/buster/libnl-genl-3-200_*.deb - sudo dpkg -i ../target/debs/buster/libnl-route-3-200_*.deb - sudo dpkg -i ../target/debs/buster/libnl-nf-3-200_*.deb - sudo dpkg -i ../target/debs/buster/libhiredis0.14_*.deb + sudo dpkg -i ../target/debs/bullseye/libnl-3-200_*.deb + sudo dpkg -i ../target/debs/bullseye/libnl-genl-3-200_*.deb + sudo dpkg -i ../target/debs/bullseye/libnl-route-3-200_*.deb + sudo dpkg -i ../target/debs/bullseye/libnl-nf-3-200_*.deb + sudo dpkg -i ../target/debs/bullseye/libhiredis0.14_*.deb + sudo dpkg -i ../target/debs/bullseye/libhiredis-dev_*.deb + sudo dpkg -i ../target/debs/bullseye/libswsscommon_1.0.0_amd64.deb + sudo dpkg -i ../target/debs/bullseye/libswsscommon-dev_1.0.0_amd64.deb + sudo dpkg -i ../target/debs/bullseye/python3-swsscommon_1.0.0_amd64.deb displayName: "Install libswsscommon dependencies" - script: | set -ex # Install .NET CORE curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - - sudo apt-add-repository https://packages.microsoft.com/debian/10/prod + sudo apt-add-repository https://packages.microsoft.com/debian/11/prod sudo apt-get update sudo apt-get install -y dotnet-sdk-5.0 displayName: "Install .NET CORE" - - task: DownloadPipelineArtifact@2 - inputs: - source: specific - project: build - pipeline: Azure.sonic-swss-common - artifact: sonic-swss-common - runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' - displayName: "Download sonic-swss-common" - - - script: | - set -ex - # LIBSWSSCOMMON - sudo dpkg -i libswsscommon_1.0.0_amd64.deb - sudo dpkg -i libswsscommon-dev_1.0.0_amd64.deb - sudo dpkg -i python3-swsscommon_1.0.0_amd64.deb - workingDirectory: $(Pipeline.Workspace)/ - displayName: 'Install libswsscommon package' - - script: | set -ex ls -l diff --git a/common_utils/constants.go b/common_utils/constants.go new file mode 100644 index 00000000..db38b0e6 --- /dev/null +++ b/common_utils/constants.go @@ -0,0 +1,4 @@ + +package common_utils + +const GNMI_WORK_PATH = "/tmp" diff --git a/common_utils/context.go b/common_utils/context.go index b7c257ef..fc3fb651 100644 --- a/common_utils/context.go +++ b/common_utils/context.go @@ -36,6 +36,25 @@ const requestContextKey contextkey = 0 // Request Id generator var requestCounter uint64 +var CountersName = [...]string { + "GNMI get", + "GNMI get fail", + "GNMI set", + "GNMI set fail", + "GNOI reboot", + "DBUS", + "DBUS fail", + "DBUS apply patch db", + "DBUS apply patch yang", + "DBUS create checkpoint", + "DBUS delete checkpoint", + "DBUS config save", + "DBUS config reload", +} + +var globalCounters [len(CountersName)]uint64 + + // GetContext function returns the RequestContext object for a // gRPC request. RequestContext is maintained as a context value of // the request. Creates a new RequestContext object is not already @@ -55,8 +74,25 @@ func GetContext(ctx context.Context) (*RequestContext, context.Context) { func GetUsername(ctx context.Context, username *string) { rc, _ := GetContext(ctx) - if rc != nil { - *username = rc.Auth.User - } + if rc != nil { + *username = rc.Auth.User + } +} + +func InitCounters() { + for i := 0; i < len(CountersName); i++ { + globalCounters[i] = 0 + } + SetMemCounters(&globalCounters) +} + +func IncCounter(name string) { + for i := 0; i < len(CountersName); i++ { + if CountersName[i] == name { + atomic.AddUint64(&globalCounters[i], 1) + break + } + } + SetMemCounters(&globalCounters) } diff --git a/common_utils/shareMem.go b/common_utils/shareMem.go new file mode 100644 index 00000000..f002fbe7 --- /dev/null +++ b/common_utils/shareMem.go @@ -0,0 +1,64 @@ +package common_utils + +import ( + "fmt" + "syscall" + "unsafe" +) + +// Use share memory to dump GNMI internal counters, +// GNMI server and gnmi_dump should use memKey to access the share memory, +// memSize is 1024 bytes, so we can support 128 counters +// memMode is 0x380, this value is O_RDWR|IPC_CREAT, +// O_RDWR means: Owner can write and read the file, everyone else can't. +// IPC_CREAT means: Create a shared memory segment if a shared memory identifier does not exist for memKey. +var ( + memKey = 7749 + memSize = 1024 + memMode = 0x380 +) + +func SetMemCounters(counters *[len(CountersName)]uint64) error { + shmid, _, err := syscall.Syscall(syscall.SYS_SHMGET, uintptr(memKey), uintptr(memSize), uintptr(memMode)) + if int(shmid) == -1 { + return fmt.Errorf("syscall error, err: %v\n", err) + } + + shmaddr, _, err := syscall.Syscall(syscall.SYS_SHMAT, shmid, 0, 0) + if int(shmaddr) == -1 { + return fmt.Errorf("syscall error, err: %v\n", err) + } + defer syscall.Syscall(syscall.SYS_SHMDT, shmaddr, 0, 0) + + const size = unsafe.Sizeof(uint64(0)) + addr := uintptr(unsafe.Pointer(shmaddr)) + + for i := 0; i < len(counters); i++ { + *(*uint64)(unsafe.Pointer(addr)) = counters[i] + addr += size + } + return nil +} + +func GetMemCounters(counters *[len(CountersName)]uint64) error { + shmid, _, err := syscall.Syscall(syscall.SYS_SHMGET, uintptr(memKey), uintptr(memSize), uintptr(memMode)) + if int(shmid) == -1 { + return fmt.Errorf("syscall error, err: %v\n", err) + } + + shmaddr, _, err := syscall.Syscall(syscall.SYS_SHMAT, shmid, 0, 0) + if int(shmaddr) == -1 { + return fmt.Errorf("syscall error, err: %v\n", err) + } + defer syscall.Syscall(syscall.SYS_SHMDT, shmaddr, 0, 0) + + const size = unsafe.Sizeof(uint64(0)) + addr := uintptr(unsafe.Pointer(shmaddr)) + + for i := 0; i < len(counters); i++ { + counters[i] = *(*uint64)(unsafe.Pointer(addr)) + addr += size + } + return nil +} + diff --git a/gnmi_dump/gnmi_dump.go b/gnmi_dump/gnmi_dump.go new file mode 100644 index 00000000..cda7dab7 --- /dev/null +++ b/gnmi_dump/gnmi_dump.go @@ -0,0 +1,29 @@ +package main + +import ( + "flag" + "fmt" + "github.com/sonic-net/sonic-gnmi/common_utils" +) + +const help = ` +gnmi_dump is used to dump internal counters for debugging purpose, +including GNMI request counter, GNOI request counter and DBUS request counter. +` + +func main() { + flag.Usage = func() { + fmt.Print(help) + } + flag.Parse() + var counters [len(common_utils.CountersName)]uint64 + err := common_utils.GetMemCounters(&counters) + if err != nil { + fmt.Printf("Error: Fail to read counters, %v", err) + return + } + fmt.Printf("Dump GNMI counters\n") + for i := 0; i < len(common_utils.CountersName); i++ { + fmt.Printf("%v---%v\n", common_utils.CountersName[i], counters[i]) + } +} diff --git a/gnmi_server/constants_native.go b/gnmi_server/constants_native.go new file mode 100644 index 00000000..450899c9 --- /dev/null +++ b/gnmi_server/constants_native.go @@ -0,0 +1,5 @@ +// +build !gnmi_native_write + +package gnmi + +const ENABLE_NATIVE_WRITE = false diff --git a/gnmi_server/constants_native_write.go b/gnmi_server/constants_native_write.go new file mode 100644 index 00000000..8bb98fae --- /dev/null +++ b/gnmi_server/constants_native_write.go @@ -0,0 +1,5 @@ +// +build gnmi_native_write + +package gnmi + +const ENABLE_NATIVE_WRITE = true diff --git a/gnmi_server/constants.go b/gnmi_server/constants_translib.go similarity index 94% rename from gnmi_server/constants.go rename to gnmi_server/constants_translib.go index af572add..d3ef6e75 100644 --- a/gnmi_server/constants.go +++ b/gnmi_server/constants_translib.go @@ -1,5 +1,5 @@ -// +build !gnmi_translib_write - -package gnmi - -const ENABLE_TRANSLIB_WRITE = false +// +build !gnmi_translib_write + +package gnmi + +const ENABLE_TRANSLIB_WRITE = false diff --git a/gnmi_server/constants_readwrite.go b/gnmi_server/constants_translib_write.go similarity index 94% rename from gnmi_server/constants_readwrite.go rename to gnmi_server/constants_translib_write.go index 7111fc38..662a9930 100644 --- a/gnmi_server/constants_readwrite.go +++ b/gnmi_server/constants_translib_write.go @@ -1,5 +1,5 @@ -// +build gnmi_translib_write - -package gnmi - -const ENABLE_TRANSLIB_WRITE = true +// +build gnmi_translib_write + +package gnmi + +const ENABLE_TRANSLIB_WRITE = true diff --git a/gnmi_server/gnoi.go b/gnmi_server/gnoi.go index 236a663d..8bd96536 100644 --- a/gnmi_server/gnoi.go +++ b/gnmi_server/gnoi.go @@ -2,12 +2,17 @@ package gnmi import ( "context" + "errors" + "os" gnoi_system_pb "github.com/openconfig/gnoi/system" log "github.com/golang/glog" "time" spb "github.com/sonic-net/sonic-gnmi/proto/gnoi" transutil "github.com/sonic-net/sonic-gnmi/transl_utils" + io "io/ioutil" + ssc "github.com/sonic-net/sonic-gnmi/sonic_service_client" spb_jwt "github.com/sonic-net/sonic-gnmi/proto/gnoi/jwt" + "github.com/sonic-net/sonic-gnmi/common_utils" "google.golang.org/grpc/status" "google.golang.org/grpc/codes" "os/user" @@ -15,14 +20,42 @@ import ( jwt "github.com/dgrijalva/jwt-go" ) +func RebootSystem(fileName string) error { + log.V(2).Infof("Rebooting with %s...", fileName) + sc, err := ssc.NewDbusClient() + if err != nil { + return err + } + err = sc.ConfigReload(fileName) + return err +} + func (srv *Server) Reboot(ctx context.Context, req *gnoi_system_pb.RebootRequest) (*gnoi_system_pb.RebootResponse, error) { + fileName := common_utils.GNMI_WORK_PATH + "/config_db.json.tmp" + _, err := authenticate(srv.config.UserAuth, ctx) if err != nil { return nil, err } log.V(1).Info("gNOI: Reboot") - return nil, status.Errorf(codes.Unimplemented, "") + log.V(1).Info("Request:", req) + log.V(1).Info("Reboot system now, delay is ignored...") + // TODO: Support GNOI reboot delay + // Delay in nanoseconds before issuing reboot. + // https://github.com/openconfig/gnoi/blob/master/system/system.proto#L102-L115 + config_db_json, err := io.ReadFile(fileName) + if errors.Is(err, os.ErrNotExist) { + fileName = "" + } + err = RebootSystem(string(config_db_json)) + if err != nil { + return nil, err + } + var resp gnoi_system_pb.RebootResponse + return &resp, nil } + +// TODO: Support GNOI RebootStatus func (srv *Server) RebootStatus(ctx context.Context, req *gnoi_system_pb.RebootStatusRequest) (*gnoi_system_pb.RebootStatusResponse, error) { _, err := authenticate(srv.config.UserAuth, ctx) if err != nil { @@ -31,6 +64,8 @@ func (srv *Server) RebootStatus(ctx context.Context, req *gnoi_system_pb.RebootS log.V(1).Info("gNOI: RebootStatus") return nil, status.Errorf(codes.Unimplemented, "") } + +// TODO: Support GNOI CancelReboot func (srv *Server) CancelReboot(ctx context.Context, req *gnoi_system_pb.CancelRebootRequest) (*gnoi_system_pb.CancelRebootResponse, error) { _, err := authenticate(srv.config.UserAuth, ctx) if err != nil { diff --git a/gnmi_server/server.go b/gnmi_server/server.go index 7717e1d9..07ce1bef 100644 --- a/gnmi_server/server.go +++ b/gnmi_server/server.go @@ -50,6 +50,7 @@ type Config struct { LogLevel int UserAuth AuthTypes EnableTranslibWrite bool + EnableNativeWrite bool } var AuthLock sync.Mutex @@ -124,6 +125,8 @@ func NewServer(config *Config, opts []grpc.ServerOption) (*Server, error) { return nil, errors.New("config not provided") } + common_utils.InitCounters() + s := grpc.NewServer(opts...) reflection.Register(s) @@ -142,8 +145,10 @@ func NewServer(config *Config, opts []grpc.ServerOption) (*Server, error) { } gnmipb.RegisterGNMIServer(srv.s, srv) spb_jwt_gnoi.RegisterSonicJwtServiceServer(srv.s, srv) - if srv.config.EnableTranslibWrite { + if srv.config.EnableTranslibWrite || srv.config.EnableNativeWrite { gnoi_system_pb.RegisterSystemServer(srv.s, srv) + } + if srv.config.EnableTranslibWrite { spb_gnoi.RegisterSonicServiceServer(srv.s, srv) } log.V(1).Infof("Created Server on %s, read-only: %t", srv.Address(), !srv.config.EnableTranslibWrite) @@ -274,26 +279,32 @@ func (s *Server) checkEncodingAndModel(encoding gnmipb.Encoding, models []*gnmip // Get implements the Get RPC in gNMI spec. func (s *Server) Get(ctx context.Context, req *gnmipb.GetRequest) (*gnmipb.GetResponse, error) { + common_utils.IncCounter("GNMI get") ctx, err := authenticate(s.config.UserAuth, ctx) if err != nil { + common_utils.IncCounter("GNMI get fail") return nil, err } if req.GetType() != gnmipb.GetRequest_ALL { + common_utils.IncCounter("GNMI get fail") return nil, status.Errorf(codes.Unimplemented, "unsupported request type: %s", gnmipb.GetRequest_DataType_name[int32(req.GetType())]) } if err = s.checkEncodingAndModel(req.GetEncoding(), req.GetUseModels()); err != nil { + common_utils.IncCounter("GNMI get fail") return nil, status.Error(codes.Unimplemented, err.Error()) } var target string prefix := req.GetPrefix() if prefix == nil { + common_utils.IncCounter("GNMI get fail") return nil, status.Error(codes.Unimplemented, "No target specified in prefix") } else { target = prefix.GetTarget() if target == "" { + common_utils.IncCounter("GNMI get fail") return nil, status.Error(codes.Unimplemented, "Empty target data not supported yet") } } @@ -307,6 +318,8 @@ func (s *Server) Get(ctx context.Context, req *gnmipb.GetRequest) (*gnmipb.GetRe if target == "OTHERS" { dc, err = sdc.NewNonDbClient(paths, prefix) + } else if target == "MIXED" { + dc, err = sdc.NewMixedDbClient(paths, prefix) } else if _, ok, _, _ := sdc.IsTargetDb(target); ok { dc, err = sdc.NewDbClient(paths, prefix) } else { @@ -315,11 +328,13 @@ func (s *Server) Get(ctx context.Context, req *gnmipb.GetRequest) (*gnmipb.GetRe } if err != nil { + common_utils.IncCounter("GNMI get fail") return nil, status.Error(codes.NotFound, err.Error()) } notifications := make([]*gnmipb.Notification, len(paths)) spbValues, err := dc.Get(nil) if err != nil { + common_utils.IncCounter("GNMI get fail") return nil, status.Error(codes.NotFound, err.Error()) } @@ -339,17 +354,56 @@ func (s *Server) Get(ctx context.Context, req *gnmipb.GetRequest) (*gnmipb.GetRe } func (s *Server) Set(ctx context.Context, req *gnmipb.SetRequest) (*gnmipb.SetResponse, error) { + common_utils.IncCounter("GNMI set") + if s.config.EnableTranslibWrite == false && s.config.EnableNativeWrite == false { + common_utils.IncCounter("GNMI set fail") + return nil, grpc.Errorf(codes.Unimplemented, "GNMI is in read-only mode") + } ctx, err := authenticate(s.config.UserAuth, ctx) if err != nil { + common_utils.IncCounter("GNMI set fail") return nil, err } var results []*gnmipb.UpdateResult /* Fetch the prefix. */ prefix := req.GetPrefix() + var target string + if prefix == nil { + common_utils.IncCounter("GNMI set fail") + return nil, status.Error(codes.Unimplemented, "No target specified in prefix") + } else { + target = prefix.GetTarget() + } extensions := req.GetExtension() - /* Create Transl client. */ - dc, _ := sdc.NewTranslClient(prefix, nil, ctx, extensions) + + var dc sdc.Client + if target == "MIXED" { + if s.config.EnableNativeWrite == false { + common_utils.IncCounter("GNMI set fail") + return nil, grpc.Errorf(codes.Unimplemented, "Mixed schema is disabled") + } + paths := req.GetDelete() + for _, path := range req.GetReplace() { + paths = append(paths, path.GetPath()) + } + for _, path := range req.GetUpdate() { + paths = append(paths, path.GetPath()) + } + dc, err = sdc.NewMixedDbClient(paths, prefix) + } else { + if s.config.EnableTranslibWrite == false { + common_utils.IncCounter("GNMI set fail") + return nil, grpc.Errorf(codes.Unimplemented, "Telemetry is in read-only mode") + } + /* Create Transl client. */ + dc, err = sdc.NewTranslClient(prefix, nil, ctx, extensions) + } + + if err != nil { + common_utils.IncCounter("GNMI set fail") + return nil, status.Error(codes.NotFound, err.Error()) + } /* DELETE */ for _, path := range req.GetDelete() { @@ -388,13 +442,11 @@ func (s *Server) Set(ctx context.Context, req *gnmipb.SetRequest) (*gnmipb.SetRe /* Add to Set response results. */ results = append(results, &res) } - if s.config.EnableTranslibWrite { - err = dc.Set(req.GetDelete(), req.GetReplace(), req.GetUpdate()) - } else { - return nil, grpc.Errorf(codes.Unimplemented, "Telemetry is in read-only mode") + err = dc.Set(req.GetDelete(), req.GetReplace(), req.GetUpdate()) + if err != nil { + common_utils.IncCounter("GNMI set fail") } - return &gnmipb.SetResponse{ Prefix: req.GetPrefix(), Response: results, @@ -408,10 +460,14 @@ func (s *Server) Capabilities(ctx context.Context, req *gnmipb.CapabilityRequest return nil, err } extensions := req.GetExtension() - dc, _ := sdc.NewTranslClient(nil, nil, ctx, extensions) /* Fetch the client capabitlities. */ - supportedModels := dc.Capabilities() + var supportedModels []gnmipb.ModelData + dc, _ := sdc.NewTranslClient(nil, nil, ctx, extensions) + supportedModels = append(supportedModels, dc.Capabilities()...) + dc, _ = sdc.NewMixedDbClient(nil, nil) + supportedModels = append(supportedModels, dc.Capabilities()...) + suppModels := make([]*gnmipb.ModelData, len(supportedModels)) for index, model := range supportedModels { diff --git a/gnmi_server/server_test.go b/gnmi_server/server_test.go index 3a0c0da8..e1a4088c 100644 --- a/gnmi_server/server_test.go +++ b/gnmi_server/server_test.go @@ -5,6 +5,7 @@ package gnmi import ( "crypto/tls" "encoding/json" + "path/filepath" "flag" "fmt" "strings" @@ -40,11 +41,13 @@ import ( sgpb "github.com/sonic-net/sonic-gnmi/proto/gnoi" sdc "github.com/sonic-net/sonic-gnmi/sonic_data_client" sdcfg "github.com/sonic-net/sonic-gnmi/sonic_db_config" + "github.com/sonic-net/sonic-gnmi/common_utils" "github.com/sonic-net/sonic-gnmi/test_utils" gclient "github.com/jipanyang/gnmi/client/gnmi" "github.com/jipanyang/gnxi/utils/xpath" gnoi_system_pb "github.com/openconfig/gnoi/system" "github.com/agiledragon/gomonkey/v2" + "github.com/godbus/dbus/v5" ) var clientTypes = []string{gclient.Type} @@ -99,7 +102,26 @@ func createServer(t *testing.T, port int64) *Server { } opts := []grpc.ServerOption{grpc.Creds(credentials.NewTLS(tlsCfg))} - cfg := &Config{Port: port, EnableTranslibWrite: true} + cfg := &Config{Port: port, EnableTranslibWrite: true, EnableNativeWrite: true} + s, err := NewServer(cfg, opts) + if err != nil { + t.Errorf("Failed to create gNMI server: %v", err) + } + return s +} + +func createReadServer(t *testing.T, port int64) *Server { + certificate, err := testcert.NewCert() + if err != nil { + t.Errorf("could not load server key pair: %s", err) + } + tlsCfg := &tls.Config{ + ClientAuth: tls.RequestClientCert, + Certificates: []tls.Certificate{certificate}, + } + + opts := []grpc.ServerOption{grpc.Creds(credentials.NewTLS(tlsCfg))} + cfg := &Config{Port: port, EnableTranslibWrite: false} s, err := NewServer(cfg, opts) if err != nil { t.Errorf("Failed to create gNMI server: %v", err) @@ -118,7 +140,7 @@ func createAuthServer(t *testing.T, port int64) *Server { } opts := []grpc.ServerOption{grpc.Creds(credentials.NewTLS(tlsCfg))} - cfg := &Config{Port: port, UserAuth: AuthTypes{"password": true, "cert": true, "jwt": true}} + cfg := &Config{Port: port, EnableTranslibWrite: true, UserAuth: AuthTypes{"password": true, "cert": true, "jwt": true}} s, err := NewServer(cfg, opts) if err != nil { t.Errorf("Failed to create gNMI server: %v", err) @@ -126,6 +148,24 @@ func createAuthServer(t *testing.T, port int64) *Server { return s } +func createInvalidServer(t *testing.T, port int64) *Server { + certificate, err := testcert.NewCert() + if err != nil { + t.Errorf("could not load server key pair: %s", err) + } + tlsCfg := &tls.Config{ + ClientAuth: tls.RequestClientCert, + Certificates: []tls.Certificate{certificate}, + } + + opts := []grpc.ServerOption{grpc.Creds(credentials.NewTLS(tlsCfg))} + s, err := NewServer(nil, opts) + if err != nil { + return nil + } + return s +} + // runTestGet requests a path from the server by Get grpc call, and compares if // the return code and response value are expected. func runTestGet(t *testing.T, ctx context.Context, gClient pb.GNMIClient, pathTarget string, @@ -218,12 +258,13 @@ func runTestSet(t *testing.T, ctx context.Context, gClient pb.GNMIClient, pathTa req := &pb.SetRequest{} switch op { case Replace: - //prefix := pb.Path{Target: pathTarget} + prefix := pb.Path{Target: pathTarget} var v *pb.TypedValue v = &pb.TypedValue{ Value: &pb.TypedValue_JsonIetfVal{JsonIetfVal: extractJSON(attributeData)}} req = &pb.SetRequest{ + Prefix: &prefix, Replace: []*pb.Update{&pb.Update{Path: &pbPath, Val: v}}, } case Delete: @@ -732,6 +773,102 @@ func TestGnmiSet(t *testing.T) { s.s.Stop() } +func TestGnmiSetReadOnly(t *testing.T) { + s := createReadServer(t, 8081) + go runServer(t, s) + defer s.s.Stop() + + tlsConfig := &tls.Config{InsecureSkipVerify: true} + opts := []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))} + + targetAddr := "127.0.0.1:8081" + conn, err := grpc.Dial(targetAddr, opts...) + if err != nil { + t.Fatalf("Dialing to %q failed: %v", targetAddr, err) + } + defer conn.Close() + + gClient := pb.NewGNMIClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &pb.SetRequest{} + _, err = gClient.Set(ctx, req) + gotRetStatus, ok := status.FromError(err) + if !ok { + t.Fatal("got a non-grpc error from grpc call") + } + wantRetCode := codes.Unimplemented + if gotRetStatus.Code() != wantRetCode { + t.Log("err: ", err) + t.Fatalf("got return code %v, want %v", gotRetStatus.Code(), wantRetCode) + } +} + +func TestGnmiSetAuthFail(t *testing.T) { + s := createAuthServer(t, 8081) + go runServer(t, s) + defer s.s.Stop() + + tlsConfig := &tls.Config{InsecureSkipVerify: true} + opts := []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))} + + targetAddr := "127.0.0.1:8081" + conn, err := grpc.Dial(targetAddr, opts...) + if err != nil { + t.Fatalf("Dialing to %q failed: %v", targetAddr, err) + } + defer conn.Close() + + gClient := pb.NewGNMIClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &pb.SetRequest{} + _, err = gClient.Set(ctx, req) + gotRetStatus, ok := status.FromError(err) + if !ok { + t.Fatal("got a non-grpc error from grpc call") + } + wantRetCode := codes.Unauthenticated + if gotRetStatus.Code() != wantRetCode { + t.Log("err: ", err) + t.Fatalf("got return code %v, want %v", gotRetStatus.Code(), wantRetCode) + } +} + +func TestGnmiGetAuthFail(t *testing.T) { + s := createAuthServer(t, 8081) + go runServer(t, s) + defer s.s.Stop() + + tlsConfig := &tls.Config{InsecureSkipVerify: true} + opts := []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))} + + targetAddr := "127.0.0.1:8081" + conn, err := grpc.Dial(targetAddr, opts...) + if err != nil { + t.Fatalf("Dialing to %q failed: %v", targetAddr, err) + } + defer conn.Close() + + gClient := pb.NewGNMIClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &pb.GetRequest{} + _, err = gClient.Get(ctx, req) + gotRetStatus, ok := status.FromError(err) + if !ok { + t.Fatal("got a non-grpc error from grpc call") + } + wantRetCode := codes.Unauthenticated + if gotRetStatus.Code() != wantRetCode { + t.Log("err: ", err) + t.Fatalf("got return code %v, want %v", gotRetStatus.Code(), wantRetCode) + } +} + func runGnmiTestGet(t *testing.T, namespace string) { //t.Log("Start gNMI client") tlsConfig := &tls.Config{InsecureSkipVerify: true} @@ -2574,6 +2711,7 @@ func TestAuthCapabilities(t *testing.T) { s := createAuthServer(t, 8089) go runServer(t, s) + defer s.s.Stop() currentUser, _ := user.Current() tlsConfig := &tls.Config{InsecureSkipVerify: true} @@ -2599,7 +2737,6 @@ func TestAuthCapabilities(t *testing.T) { if len(resp.SupportedModels) == 0 { t.Fatalf("No Supported Models found!") } - } func TestClient(t *testing.T) { @@ -2720,6 +2857,148 @@ func TestClient(t *testing.T) { s.s.Stop() } +func TestGnmiSetBatch(t *testing.T) { + mockCode := +` +print('No Yang validation for test mode...') +print('%s') +` + mock1 := gomonkey.ApplyGlobalVar(&sdc.PyCodeForYang, mockCode) + defer mock1.Reset() + + s := createServer(t, 8090) + go runServer(t, s) + + prepareDbTranslib(t) + + //t.Log("Start gNMI client") + tlsConfig := &tls.Config{InsecureSkipVerify: true} + opts := []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))} + + targetAddr := "127.0.0.1:8090" + conn, err := grpc.Dial(targetAddr, opts...) + if err != nil { + t.Fatalf("Dialing to %q failed: %v", targetAddr, err) + } + defer conn.Close() + + gClient := pb.NewGNMIClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + var emptyRespVal interface{} + + tds := []struct { + desc string + pathTarget string + textPbPath string + wantRetCode codes.Code + wantRespVal interface{} + attributeData string + operation op_t + valTest bool + }{ + { + desc: "Set APPL_DB in batch", + pathTarget: "MIXED", + textPbPath: ` + origin: "sonic-db", + elem: elem: + `, + attributeData: "../testdata/batch.txt", + wantRetCode: codes.OK, + wantRespVal: emptyRespVal, + operation: Replace, + valTest: false, + }, + } + + for _, td := range tds { + if td.valTest == true { + // wait for 2 seconds for change to sync + time.Sleep(2 * time.Second) + t.Run(td.desc, func(t *testing.T) { + runTestGet(t, ctx, gClient, td.pathTarget, td.textPbPath, td.wantRetCode, td.wantRespVal, td.valTest) + }) + } else { + t.Run(td.desc, func(t *testing.T) { + runTestSet(t, ctx, gClient, td.pathTarget, td.textPbPath, td.wantRetCode, td.wantRespVal, td.attributeData, td.operation) + }) + } + } + s.s.Stop() +} + +func TestGNMINative(t *testing.T) { + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(0) + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + mockCode := +` +print('No Yang validation for test mode...') +print('%s') +` + mock3 := gomonkey.ApplyGlobalVar(&sdc.PyCodeForYang, mockCode) + defer mock3.Reset() + + s := createServer(t, 8080) + go runServer(t, s) + defer s.s.Stop() + + path, _ := os.Getwd() + path = filepath.Dir(path) + + var cmd *exec.Cmd + cmd = exec.Command("bash", "-c", "cd "+path+" && "+"pytest") + if result, err := cmd.Output(); err != nil { + fmt.Println(string(result)) + t.Errorf("Fail to execute pytest: %v", err) + } else { + fmt.Println(string(result)) + } + + var counters [len(common_utils.CountersName)]uint64 + err := common_utils.GetMemCounters(&counters) + if err != nil { + t.Errorf("Error: Fail to read counters, %v", err) + } + for i := 0; i < len(common_utils.CountersName); i++ { + if common_utils.CountersName[i] == "GNMI set" && counters[i] == 0 { + t.Errorf("GNMI set counter should not be 0") + } + if common_utils.CountersName[i] == "GNMI get" && counters[i] == 0 { + t.Errorf("GNMI get counter should not be 0") + } + } + s.s.Stop() +} + +func TestServerPort(t *testing.T) { + s := createServer(t, -8080) + port := s.Port() + if port != 0 { + t.Errorf("Invalid port: %d", port) + } + s.s.Stop() +} + +func TestInvalidServer(t *testing.T) { + s := createInvalidServer(t, 9000) + if s != nil { + t.Errorf("Should not create invalid server") + } +} + func init() { // Enable logs at UT setup flag.Lookup("v").Value.Set("10") diff --git a/gnoi_client/gnoi_client.go b/gnoi_client/gnoi_client.go index 286ba50d..31ea33da 100644 --- a/gnoi_client/gnoi_client.go +++ b/gnoi_client/gnoi_client.go @@ -51,6 +51,12 @@ func main() { switch *rpc { case "Time": systemTime(sc, ctx) + case "Reboot": + systemReboot(sc, ctx) + case "CancelReboot": + systemCancelReboot(sc, ctx) + case "RebootStatus": + systemRebootStatus(sc, ctx) default: panic("Invalid RPC Name") } @@ -103,6 +109,48 @@ func systemTime(sc gnoi_system_pb.SystemClient, ctx context.Context) { fmt.Println(string(respstr)) } +func systemReboot(sc gnoi_system_pb.SystemClient, ctx context.Context) { + fmt.Println("System Reboot") + ctx = setUserCreds(ctx) + req := &gnoi_system_pb.RebootRequest {} + json.Unmarshal([]byte(*args), req) + _,err := sc.Reboot(ctx, req) + if err != nil { + panic(err.Error()) + } +} + +func systemCancelReboot(sc gnoi_system_pb.SystemClient, ctx context.Context) { + fmt.Println("System CancelReboot") + ctx = setUserCreds(ctx) + req := &gnoi_system_pb.CancelRebootRequest {} + json.Unmarshal([]byte(*args), req) + resp,err := sc.CancelReboot(ctx, req) + if err != nil { + panic(err.Error()) + } + respstr, err := json.Marshal(resp) + if err != nil { + panic(err.Error()) + } + fmt.Println(string(respstr)) +} + +func systemRebootStatus(sc gnoi_system_pb.SystemClient, ctx context.Context) { + fmt.Println("System RebootStatus") + ctx = setUserCreds(ctx) + req := &gnoi_system_pb.RebootStatusRequest {} + resp,err := sc.RebootStatus(ctx, req) + if err != nil { + panic(err.Error()) + } + respstr, err := json.Marshal(resp) + if err != nil { + panic(err.Error()) + } + fmt.Println(string(respstr)) +} + func sonicShowTechSupport(sc spb.SonicServiceClient, ctx context.Context) { fmt.Println("Sonic ShowTechsupport") ctx = setUserCreds(ctx) diff --git a/go.mod b/go.mod index 8cf9726b..e347647f 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/c9s/goprocinfo v0.0.0-20191125144613-4acdd056c72d github.com/dgrijalva/jwt-go v3.2.1-0.20210802184156-9742bd7fca1c+incompatible github.com/go-redis/redis v6.15.6+incompatible + github.com/godbus/dbus/v5 v5.1.0 github.com/gogo/protobuf v1.3.2 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.4.3 diff --git a/libcswsscommon/.gitignore b/libcswsscommon/.gitignore new file mode 100644 index 00000000..86fd896e --- /dev/null +++ b/libcswsscommon/.gitignore @@ -0,0 +1,3 @@ +.depend +*.o +*.a diff --git a/libcswsscommon/Makefile b/libcswsscommon/Makefile new file mode 100644 index 00000000..34d89411 --- /dev/null +++ b/libcswsscommon/Makefile @@ -0,0 +1,29 @@ +RM=rm -f + +INCLUDES= \ + -Iinclude \ + -I$(SWSSCOMMON_SRC)/common \ + -I/usr/include/swss + +CPPFLAGS=-g -std=c++11 $(INCLUDES) +LDFLAGS=-g -shared + +SRCS=src/dbconnector.cpp src/producertable.cpp src/producerstatetable.cpp src/table.cpp +OBJS=$(SRCS:.cpp=.o) +%.o: %.cpp + $(CXX) $(CPPFLAGS) -c $< -o $@ + +.PHONY: all swsscommon clean install + +all: swsscommon + +swsscommon: $(OBJS) + $(AR) rvs libcswsscommon.a $(OBJS) + +install: swsscommon + cp libcswsscommon.a /usr/lib # FIXME: better use install + cp -r include/* /usr/include + +clean: + $(RM) $(OBJS) + $(RM) libcswsscommon.a diff --git a/libcswsscommon/include/capi/dbconnector.h b/libcswsscommon/include/capi/dbconnector.h new file mode 100644 index 00000000..51ebbf04 --- /dev/null +++ b/libcswsscommon/include/capi/dbconnector.h @@ -0,0 +1,36 @@ +#ifndef _C_DBCONNECTOR_H +#define _C_DBCONNECTOR_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void *db_connector_t; + +// DBConnector::DBConnector(int db, std::string hostname, int port, unsigned int timeout) +db_connector_t db_connector_new(int db, const char *hostname, int port, unsigned int timeout); +// DBConnector::DBConnector(int db, std::string unixPath, unsigned int timeout) +db_connector_t db_connector_new2(int db, const char *unixPath, unsigned int timeout); + +// DBConnector::~DBConnector() +void db_connector_delete(db_connector_t db); + +// redisContext *DBConnector::getContext() +redisContext *db_connector_get_context(db_connector_t db); +// int DBConnector::getDB() +int db_connector_get_db(db_connector_t db); + +// static void DBConnector::select(DBConnector *db) +void db_connector_select(db_connector_t db); + +// DBConnector *DBConnector::newConnector(unsigned int timeout); +db_connector_t db_connector_new_connector(db_connector_t db, unsigned int timeout); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/libcswsscommon/include/capi/producerstatetable.h b/libcswsscommon/include/capi/producerstatetable.h new file mode 100644 index 00000000..7fc48505 --- /dev/null +++ b/libcswsscommon/include/capi/producerstatetable.h @@ -0,0 +1,54 @@ +#ifndef _C_PRODUCERSTATETABLE_H +#define _C_PRODUCERSTATETABLE_H + +#include +#include + +#include "producertable.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void *db_connector_t2; +typedef void *redis_pipeline_t; +typedef void *producer_state_table_t; + +// ProducerStateTable::ProducerStateTable(DBConnector *db, std::string tableName) +producer_state_table_t producer_state_table_new(db_connector_t2 db, const char *tableName); +// ProducerStateTable::ProducerStateTable(RedisPipeline *pipeline, std::string tableName, bool buffered = false) +producer_state_table_t producer_state_table_new2(redis_pipeline_t pipeline, const char *tableName, bool buffered); + +// ProducerStateTable::~ProducerStateTable() +void producer_state_table_delete(producer_state_table_t pt); + +// void ProducerStateTable::setBuffered(bool buffered) +void producer_state_table_set_buffered(producer_state_table_t pt, bool buffered); + +// void ProducerStateTable::set(std::string key, +// std::vector &values, +// std::string op = SET_COMMAND, +// std::string prefix = EMPTY_PREFIX) +void producer_state_table_set(producer_state_table_t pt, + const char *key, + const field_value_tuple_t *values, + size_t count, + const char *op, + const char *prefix); + +// void ProducerStateTable::del(std::string key, +// std::string op = DEL_COMMAND, +// std::string prefix = EMPTY_PREFIX) +void producer_state_table_del(producer_state_table_t pt, + const char *key, + const char *op, + const char *prefix); + +// void ProducerStateTable::flush() +void producer_state_table_flush(producer_state_table_t pt); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/libcswsscommon/include/capi/producertable.h b/libcswsscommon/include/capi/producertable.h new file mode 100644 index 00000000..ba718177 --- /dev/null +++ b/libcswsscommon/include/capi/producertable.h @@ -0,0 +1,60 @@ +#ifndef _C_PRODUCERTABLE_H +#define _C_PRODUCERTABLE_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void *db_connector_t; +typedef void *redis_pipeline_t; +typedef void *producer_table_t; + +typedef struct field_value_tuple +{ + const char *field; + const char *value; +} field_value_tuple_t; + +// ProducerTable::ProducerTable(DBConnector *db, std::string tableName) +producer_table_t producer_table_new(db_connector_t db, const char *tableName); +// ProducerTable::ProducerTable(RedisPipeline *pipeline, std::string tableName, bool buffered = false) +producer_table_t producer_table_new2(redis_pipeline_t pipeline, const char *tableName, bool buffered); +// ProducerTable::ProducerTable(DBConnector *db, std::string tableName, std::string dumpFile) +producer_table_t producer_table_new3(db_connector_t db, const char *tableName, const char *dumpFile); + +// ProducerTable::~ProducerTable() +void producer_table_delete(producer_table_t pt); + +// void ProducerTable::setBuffered(bool buffered) +void producer_table_set_buffered(producer_table_t pt, bool buffered); + +// void ProducerTable::set(std::string key, +// std::vector &values, +// std::string op = SET_COMMAND, +// std::string prefix = EMPTY_PREFIX) +void producer_table_set(producer_table_t pt, + const char *key, + const field_value_tuple_t *values, + size_t count, + const char *op, + const char *prefix); + +// void ProducerTable::del(std::string key, +// std::string op = DEL_COMMAND, +// std::string prefix = EMPTY_PREFIX) +void producer_table_del(producer_table_t pt, + const char *key, + const char *op, + const char *prefix); + +// void ProducerTable::flush() +void producer_table_flush(producer_table_t pt); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/libcswsscommon/include/capi/table.h b/libcswsscommon/include/capi/table.h new file mode 100644 index 00000000..a15e8b3a --- /dev/null +++ b/libcswsscommon/include/capi/table.h @@ -0,0 +1,54 @@ +#ifndef _C_TABLE_H +#define _C_TABLE_H + +#include +#include + +#include "producertable.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void *db_connector_t2; +typedef void *redis_pipeline_t; +typedef void *table_t; + +// ProducerStateTable::ProducerStateTable(DBConnector *db, std::string tableName) +table_t table_new(db_connector_t2 db, const char *tableName); +// ProducerStateTable::ProducerStateTable(RedisPipeline *pipeline, std::string tableName, bool buffered = false) +table_t table_new2(redis_pipeline_t pipeline, const char *tableName, bool buffered); + +// ProducerStateTable::~ProducerStateTable() +void table_delete(table_t pt); + +// void ProducerStateTable::setBuffered(bool buffered) +void table_set_buffered(table_t pt, bool buffered); + +// void ProducerStateTable::set(std::string key, +// std::vector &values, +// std::string op = SET_COMMAND, +// std::string prefix = EMPTY_PREFIX) +void table_set(table_t pt, + const char *key, + const field_value_tuple_t *values, + size_t count, + const char *op, + const char *prefix); + +// void ProducerStateTable::del(std::string key, +// std::string op = DEL_COMMAND, +// std::string prefix = EMPTY_PREFIX) +void table_del(table_t pt, + const char *key, + const char *op, + const char *prefix); + +// void ProducerStateTable::flush() +void table_flush(table_t pt); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/patches/gnmi_path.patch b/patches/gnmi_path.patch new file mode 100644 index 00000000..8098aeee --- /dev/null +++ b/patches/gnmi_path.patch @@ -0,0 +1,39 @@ +Update path.go to support origin field, +use ":" in the first element to get origin field +--- ./github.com/jipanyang/gnxi/utils/xpath/path.go ++++ ./github.com/jipanyang/gnxi/utils/xpath/path.go +@@ -18,6 +18,7 @@ package xpath + + import ( + "fmt" ++ "strings" + + pb "github.com/openconfig/gnmi/proto/gnmi" + ) +@@ -45,9 +46,18 @@ func ToGNMIPath(xpath string) (*pb.Path, error) { + return nil, err + } + var pbPathElements []*pb.PathElem +- for _, elem := range xpathElements { ++ // Add support for path origin ++ origin := "" ++ for i, elem := range xpathElements { + switch v := elem.(type) { + case string: ++ if i == 0 { ++ firstElement := strings.SplitN(v, ":", 2) ++ if len(firstElement) == 2 { ++ origin = firstElement[0] ++ v = firstElement[1] ++ } ++ } + pbPathElements = append(pbPathElements, &pb.PathElem{Name: v}) + case map[string]string: + n := len(pbPathElements) +@@ -62,5 +71,5 @@ func ToGNMIPath(xpath string) (*pb.Path, error) { + return nil, fmt.Errorf("wrong data type: %T", v) + } + } +- return &pb.Path{Elem: pbPathElements}, nil ++ return &pb.Path{Origin: origin, Elem: pbPathElements}, nil + } diff --git a/patches/gnmi_set.patch b/patches/gnmi_set.patch index 48b3c31f..dc913476 100644 --- a/patches/gnmi_set.patch +++ b/patches/gnmi_set.patch @@ -12,7 +12,7 @@ pb "github.com/openconfig/gnmi/proto/gnmi" ) -@@ -54,16 +55,37 @@ +@@ -54,22 +55,45 @@ targetAddr = flag.String("target_addr", "localhost:10161", "The target address in the format of host:port") targetName = flag.String("target_name", "hostname.com", "The target name use to verify the hostname returned by TLS handshake") timeOut = flag.Duration("time_out", 10*time.Second, "Timeout for the Get request, 10 seconds by default") @@ -33,10 +33,10 @@ + if lc == -1 { + log.Exitf("invalid path-value pair: %v", item) + } -+ pathValuePair_r := strings.SplitN(modName[2], ":", 2) ++ // pathValuePair_r := strings.SplitN(modName[2], ":", 2) + // pathValuePair[0] = modName[2][0:lc] + -+ pathValuePair[0] = "/" + modName[1] + "/" + pathValuePair_r[0] ++ pathValuePair[0] = "/" + modName[1] + "/" + modName[2][0:lc] + + pathValuePair[1] = modName[2][lc+1:] + fmt.Println(pathValuePair[0]) @@ -52,7 +52,16 @@ pbPath, err := xpath.ToGNMIPath(pathValuePair[0]) if err != nil { log.Exitf("error in parsing xpath %q to gnmi path", pathValuePair[0]) -@@ -144,8 +166,10 @@ + } + var pbVal *pb.TypedValue +- if pathValuePair[1][0] == '@' { ++ if pathValuePair[1][0] == '#' { ++ pbVal = nil ++ } else if pathValuePair[1][0] == '@' { + jsonFile := pathValuePair[1][1:] + jsonConfig, err := ioutil.ReadFile(jsonFile) + if err != nil { +@@ -144,8 +168,10 @@ } replaceList := buildPbUpdateList(replaceOpt) updateList := buildPbUpdateList(updateOpt) @@ -64,7 +73,7 @@ Delete: deleteList, Replace: replaceList, Update: updateList, -@@ -155,11 +179,17 @@ +@@ -155,11 +181,17 @@ utils.PrintProto(setRequest) cli := pb.NewGNMIClient(conn) diff --git a/patches/gnmi_xpath.patch b/patches/gnmi_xpath.patch new file mode 100644 index 00000000..6e36443d --- /dev/null +++ b/patches/gnmi_xpath.patch @@ -0,0 +1,21 @@ +Use escaped '/' to support ip prefix in path element +--- ./github.com/jipanyang/gnxi/utils/xpath/xpath.go ++++ ./github.com/jipanyang/gnxi/utils/xpath/xpath.go +@@ -88,11 +88,15 @@ + for end < len(str) { + switch str[end] { + case '/': +- if !insideBrackets { ++ if end != 0 && str[end-1] == '\\' { ++ // Ignore escaped '/' ++ end++ ++ } else if !insideBrackets { + // Current '/' is a valid path element + // separator. + if end > begin { +- path = append(path, str[begin:end]) ++ val := strings.Replace(str[begin:end], `\/`, `/`, -1) ++ path = append(path, val) + } + end++ + begin = end diff --git a/sonic_data_client/client_test.go b/sonic_data_client/client_test.go new file mode 100644 index 00000000..661ec0a3 --- /dev/null +++ b/sonic_data_client/client_test.go @@ -0,0 +1,349 @@ +package client + +import ( + "testing" + "os" + "reflect" + "io/ioutil" + "encoding/json" + + "github.com/jipanyang/gnxi/utils/xpath" + gnmipb "github.com/openconfig/gnmi/proto/gnmi" +) + +var testFile string = "/etc/sonic/ut.cp.json" + +func JsonEqual(a, b []byte) (bool, error) { + var j1, j2 interface{} + var err error + if err = json.Unmarshal(a, &j1); err != nil { + return false, err + } + if err = json.Unmarshal(b, &j2); err != nil { + return false, err + } + return reflect.DeepEqual(j1, j2), nil +} + +func TestJsonClientNegative(t *testing.T) { + os.Remove(testFile) + _, err := NewJsonClient(testFile) + if err == nil { + t.Errorf("Should fail without checkpoint") + } + + text := "{" + err = ioutil.WriteFile(testFile, []byte(text), 0644) + if err != nil { + t.Errorf("Fail to create test file") + } + _, err = NewJsonClient(testFile) + if err == nil { + t.Errorf("Should fail with invalid checkpoint") + } +} + +func TestJsonAdd(t *testing.T) { + text := "{}" + err := ioutil.WriteFile(testFile, []byte(text), 0644) + if err != nil { + t.Errorf("Fail to create test file") + } + client, err := NewJsonClient(testFile) + if err != nil { + t.Errorf("Create client fail: %v", err) + } + path_list := [][]string { + []string { + "DASH_QOS", + }, + []string { + "DASH_QOS", + "qos_02", + }, + []string { + "DASH_QOS", + "qos_03", + "bw", + }, + []string { + "DASH_VNET", + "vnet001", + "address_spaces", + }, + []string { + "DASH_VNET", + "vnet002", + "address_spaces", + "0", + }, + } + value_list := []string { + `{"qos_01": {"bw": "54321", "cps": "1000", "flows": "300"}}`, + `{"bw": "10001", "cps": "1001", "flows": "101"}`, + `"20001"`, + `["10.250.0.0", "192.168.3.0", "139.66.72.9"]`, + `"6.6.6.6"`, + } + for i := 0; i < len(path_list); i++ { + path := path_list[i] + value := value_list[i] + err = client.Add(path, value) + if err != nil { + t.Errorf("Add %v fail: %v", path, err) + } + res, err := client.Get(path) + if err != nil { + t.Errorf("Get %v fail: %v", path, err) + } + ok, err := JsonEqual([]byte(value), res) + if err != nil { + t.Errorf("Compare json fail: %v", err) + return + } + if ok != true { + t.Errorf("%v and %v do not match", value, string(res)) + } + } +} + +func TestJsonAddNegative(t *testing.T) { + text := "{}" + err := ioutil.WriteFile(testFile, []byte(text), 0644) + if err != nil { + t.Errorf("Fail to create test file") + } + client, err := NewJsonClient(testFile) + if err != nil { + t.Errorf("Create client fail: %v", err) + } + path_list := [][]string { + []string { + "DASH_QOS", + }, + []string { + "DASH_QOS", + "qos_02", + }, + []string { + "DASH_QOS", + "qos_03", + "bw", + }, + []string { + "DASH_VNET", + "vnet001", + "address_spaces", + }, + []string { + "DASH_VNET", + "vnet002", + "address_spaces", + "0", + }, + []string { + "DASH_VNET", + "vnet002", + "address_spaces", + "abc", + }, + []string { + "DASH_VNET", + "vnet002", + "address_spaces", + "100", + }, + } + value_list := []string { + `{"qos_01": {"bw": "54321", "cps": "1000", "flows": "300"}`, + `{"bw": "10001", "cps": "1001", "flows": "101"`, + `20001`, + `["10.250.0.0", "192.168.3.0", "139.66.72.9"`, + `"6.6.6.6`, + `"6.6.6.6"`, + `"6.6.6.6"`, + } + for i := 0; i < len(path_list); i++ { + path := path_list[i] + value := value_list[i] + err = client.Add(path, value) + if err == nil { + t.Errorf("Add %v should fail: %v", path, err) + } + } +} + +func TestJsonRemove(t *testing.T) { + text := "{}" + err := ioutil.WriteFile(testFile, []byte(text), 0644) + if err != nil { + t.Errorf("Fail to create test file") + } + client, err := NewJsonClient(testFile) + if err != nil { + t.Errorf("Create client fail: %v", err) + } + path_list := [][]string { + []string { + "DASH_QOS", + }, + []string { + "DASH_QOS", + "qos_02", + }, + []string { + "DASH_QOS", + "qos_03", + "bw", + }, + []string { + "DASH_VNET", + "vnet001", + "address_spaces", + }, + []string { + "DASH_VNET", + "vnet002", + "address_spaces", + "0", + }, + } + value_list := []string { + `{"qos_01": {"bw": "54321", "cps": "1000", "flows": "300"}}`, + `{"bw": "10001", "cps": "1001", "flows": "101"}`, + `"20001"`, + `["10.250.0.0", "192.168.3.0", "139.66.72.9"]`, + `"6.6.6.6"`, + } + for i := 0; i < len(path_list); i++ { + path := path_list[i] + value := value_list[i] + err = client.Add(path, value) + if err != nil { + t.Errorf("Add %v fail: %v", path, err) + } + err = client.Remove(path) + if err != nil { + t.Errorf("Remove %v fail: %v", path, err) + } + _, err := client.Get(path) + if err == nil { + t.Errorf("Get %v should fail: %v", path, err) + } + } +} + +func TestJsonRemoveNegative(t *testing.T) { + text := "{}" + err := ioutil.WriteFile(testFile, []byte(text), 0644) + if err != nil { + t.Errorf("Fail to create test file") + } + client, err := NewJsonClient(testFile) + if err != nil { + t.Errorf("Create client fail: %v", err) + } + path_list := [][]string { + []string { + "DASH_QOS", + }, + []string { + "DASH_VNET", + "vnet001", + "address_spaces", + }, + } + value_list := []string { + `{"qos_01": {"bw": "54321", "cps": "1000", "flows": "300"}}`, + `["10.250.0.0", "192.168.3.0", "139.66.72.9"]`, + } + for i := 0; i < len(path_list); i++ { + path := path_list[i] + value := value_list[i] + err = client.Add(path, value) + if err != nil { + t.Errorf("Add %v fail: %v", path, err) + } + } + + remove_list := [][]string { + []string { + "DASH_QOS", + "qos_02", + }, + []string { + "DASH_QOS", + "qos_03", + "bw", + }, + []string { + "DASH_VNET", + "vnet001", + "address_spaces", + "abc", + }, + []string { + "DASH_VNET", + "vnet001", + "address_spaces", + "100", + }, + } + for i := 0; i < len(remove_list); i++ { + path := remove_list[i] + err = client.Remove(path) + if err == nil { + t.Errorf("Remove %v should fail: %v", path, err) + } + } +} + +func TestParseOrigin(t *testing.T) { + var test_paths []*gnmipb.Path + var err error + + _, err = ParseOrigin("test", test_paths) + if err != nil { + t.Errorf("ParseOrigin failed for empty path: %v", err) + } + + test_origin := "sonic-test" + path, err := xpath.ToGNMIPath(test_origin + ":CONFIG_DB/VLAN") + test_paths = append(test_paths, path) + origin, err := ParseOrigin("", test_paths) + if err != nil { + t.Errorf("ParseOrigin failed to get origin: %v", err) + } + if origin != test_origin { + t.Errorf("ParseOrigin return wrong origin: %v", origin) + } + origin, err = ParseOrigin("sonic-invalid", test_paths) + if err == nil { + t.Errorf("ParseOrigin should fail for conflict") + } +} + +func TestParseTarget(t *testing.T) { + var test_paths []*gnmipb.Path + var err error + + _, err = ParseTarget("test", test_paths) + if err != nil { + t.Errorf("ParseTarget failed for empty path: %v", err) + } + + test_target := "TEST_DB" + path, err := xpath.ToGNMIPath("sonic-db:" + test_target + "/VLAN") + test_paths = append(test_paths, path) + target, err := ParseTarget("", test_paths) + if err != nil { + t.Errorf("ParseTarget failed to get target: %v", err) + } + if target != test_target { + t.Errorf("ParseTarget return wrong target: %v", target) + } + target, err = ParseTarget("INVALID_DB", test_paths) + if err == nil { + t.Errorf("ParseTarget should fail for conflict") + } +} diff --git a/sonic_data_client/db_client.go b/sonic_data_client/db_client.go index a7e5a862..3974e625 100644 --- a/sonic_data_client/db_client.go +++ b/sonic_data_client/db_client.go @@ -87,6 +87,9 @@ type tablePath struct { tableKey string delimitor string field string + value string + index int + operation int // path name to be used in json data which may be different // from the real data path. Ex. in Counters table, real tableKey // is oid:0x####, while key name like Ethernet## may be put diff --git a/sonic_data_client/json_client.go b/sonic_data_client/json_client.go new file mode 100644 index 00000000..ba6e3666 --- /dev/null +++ b/sonic_data_client/json_client.go @@ -0,0 +1,379 @@ +package client + +import ( + "os" + "fmt" + "strconv" + "io/ioutil" + "encoding/json" + + log "github.com/golang/glog" +) + +type JsonClient struct { + jsonData map[string]interface{} +} + +func DecodeJsonTable(database map[string]interface{}, tableName string) (map[string]interface{}, error) { + vtable, ok := database[tableName] + if !ok { + log.V(2).Infof("Invalid database %v -> %v", tableName, database) + return nil, fmt.Errorf("Invalid database %v -> %v", tableName, database) + } + v, ok := vtable.(map[string]interface{}) + if !ok { + log.V(2).Infof("Invalid table %v", vtable) + return nil, fmt.Errorf("Invalid table %v", vtable) + } + return v, nil +} + +func DecodeJsonEntry(table map[string]interface{}, entryName string) (map[string]interface{}, error) { + ventry, ok := table[entryName] + if !ok { + log.V(2).Infof("Invalid entry %v", table) + return nil, fmt.Errorf("Invalid entry %v", table) + } + v, ok := ventry.(map[string]interface{}) + if !ok { + log.V(2).Infof("Invalid entry %v", ventry) + return nil, fmt.Errorf("Invalid entry %v", ventry) + } + return v, nil +} + +func DecodeJsonField(entry map[string]interface{}, fieldName string) (*string, []interface{}, error) { + vfield, ok := entry[fieldName] + if !ok { + log.V(2).Infof("Invalid entry %v", entry) + return nil, nil, fmt.Errorf("Invalid entry %v", entry) + } + str, ok := vfield.(string) + if ok { + return &str, nil, nil + } + list, ok := vfield.([]interface{}) + if ok { + return nil, list, nil + } + return nil, nil, fmt.Errorf("Invalid field %v", vfield) +} + +func DecodeJsonListItem(list []interface{}, index string) (*string, error) { + id, err := strconv.Atoi(index) + if err != nil { + log.V(2).Infof("Invalid index %v", index) + return nil, fmt.Errorf("Invalid index %v", index) + } + if id < 0 || id >= len(list) { + log.V(2).Infof("Invalid index %v", index) + return nil, fmt.Errorf("Invalid index %v", index) + } + vitem := list[id] + str, ok := vitem.(string) + if ok { + return &str, nil + } + return nil, fmt.Errorf("Invalid item %v", vitem) +} + +func NewJsonClient(fileName string) (*JsonClient, error) { + var client JsonClient + + jsonFile, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer jsonFile.Close() + + jsonData, err := ioutil.ReadAll(jsonFile) + if err!= nil { + return nil, err + } + res, err := parseJson([]byte(jsonData)) + if err != nil { + return nil, err + } + var ok bool + client.jsonData, ok = res.(map[string]interface{}) + if !ok { + log.V(2).Infof("Invalid checkpoint %v", fileName) + return nil, fmt.Errorf("Invalid checkpoint %v", fileName) + } + + return &client, nil +} + +func (c *JsonClient) Get(path []string) ([]byte, error) { + // The expect real db path could be in one of the formats: + // <1> DB Table + // <2> DB Table Key + // <3> DB Table Key Field + // <4> DB Table Key Field Index + jv := []byte{} + switch len(path) { + case 1: // only table name provided + vtable, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + return nil, err + } + jv, err = emitJSON(&vtable) + if err != nil { + return nil, err + } + case 2: // Second element must be table key + vtable, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + return nil, err + } + ventry, err := DecodeJsonEntry(vtable, path[1]) + if err != nil { + return nil, err + } + jv, err = emitJSON(&ventry) + if err != nil { + return nil, err + } + case 3: // Third element must be field name + vtable, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + return nil, err + } + ventry, err := DecodeJsonEntry(vtable, path[1]) + if err != nil { + return nil, err + } + vstr, vlist, err := DecodeJsonField(ventry, path[2]) + if err != nil { + return nil, err + } + if vstr != nil { + jv = []byte(`"` + *vstr + `"`) + } else if vlist != nil { + jv, err = json.Marshal(vlist) + if err != nil { + return nil, err + } + } + case 4: // Fourth element must be list index + vtable, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + return nil, err + } + ventry, err := DecodeJsonEntry(vtable, path[1]) + if err != nil { + return nil, err + } + _, vlist, err := DecodeJsonField(ventry, path[2]) + if err != nil { + return nil, err + } + vstr, err := DecodeJsonListItem(vlist, path[3]) + if err != nil { + return nil, err + } + if vstr != nil { + jv = []byte(`"` + *vstr + `"`) + } else { + return nil, fmt.Errorf("Invalid db table Path %v", path) + } + default: + log.V(2).Infof("Invalid db table Path %v", path) + return nil, fmt.Errorf("Invalid db table Path %v", path) + } + return jv, nil +} + +func (c *JsonClient) Add(path []string, value string) error { + // The expect real db path could be in one of the formats: + // <1> DB Table + // <2> DB Table Key + // <3> DB Table Key Field + // <4> DB Table Key Field Index + switch len(path) { + case 1: // only table name provided + vtable, err := parseJson([]byte(value)) + if err != nil { + return fmt.Errorf("Fail to parse %v", value) + } + v, ok := vtable.(map[string]interface{}) + if !ok { + log.V(2).Infof("Invalid table %v", vtable) + return fmt.Errorf("Invalid table %v", vtable) + } + c.jsonData[path[0]] = v + case 2: // Second element must be table key + vtable, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + vtable = make(map[string]interface{}) + c.jsonData[path[0]] = vtable + } + ventry, err := parseJson([]byte(value)) + if err != nil { + return fmt.Errorf("Fail to parse %v", value) + } + v, ok := ventry.(map[string]interface{}) + if !ok { + log.V(2).Infof("Invalid entry %v", ventry) + return fmt.Errorf("Invalid entry %v", ventry) + } + vtable[path[1]] = v + case 3: // Third element must be field name + vtable, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + vtable = make(map[string]interface{}) + c.jsonData[path[0]] = vtable + } + ventry, err := DecodeJsonEntry(vtable, path[1]) + if err != nil { + ventry = make(map[string]interface{}) + vtable[path[1]] = ventry + } + vfield, err := parseJson([]byte(value)) + if err != nil { + return fmt.Errorf("Fail to parse %v", value) + } + vstr, ok := vfield.(string) + if ok { + ventry[path[2]] = vstr + return nil + } + vlist, ok := vfield.([]interface{}) + if ok { + ventry[path[2]] = vlist + return nil + } + log.V(2).Infof("Invalid field %v", vfield) + return fmt.Errorf("Invalid field %v", vfield) + case 4: // Fourth element must be list index + id, err := strconv.Atoi(path[3]) + if err != nil { + log.V(2).Infof("Invalid index %v", path[3]) + return fmt.Errorf("Invalid index %v", path[3]) + } + vtable, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + vtable = make(map[string]interface{}) + c.jsonData[path[0]] = vtable + } + ventry, err := DecodeJsonEntry(vtable, path[1]) + if err != nil { + ventry = make(map[string]interface{}) + vtable[path[1]] = ventry + } + vstr, vlist, err := DecodeJsonField(ventry, path[2]) + if err != nil { + vlist = make([]interface{}, 0) + ventry[path[2]] = vlist + } + if vstr != nil { + log.V(2).Infof("Invalid target field %v", ventry) + return fmt.Errorf("Invalid target field %v", ventry) + } + if id < 0 || id > len(vlist) { + log.V(2).Infof("Invalid index %v", id) + return fmt.Errorf("Invalid index %v", id) + } + if id == len(vlist) { + vlist = append(vlist, "") + ventry[path[2]] = vlist + } + v, err := parseJson([]byte(value)) + if err != nil { + return fmt.Errorf("Fail to parse %v", value) + } + vlist[id] = v + default: + log.V(2).Infof("Invalid db table Path %v", path) + return fmt.Errorf("Invalid db table Path %v", path) + } + + return nil +} + +func (c *JsonClient) Remove(path []string) error { + // The expect real db path could be in one of the formats: + // <1> DB Table + // <2> DB Table Key + // <3> DB Table Key Field + // <4> DB Table Key Field Index + switch len(path) { + case 1: // only table name provided + _, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + return err + } + delete(c.jsonData, path[0]) + case 2: // Second element must be table key + vtable, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + return err + } + _, err = DecodeJsonEntry(vtable, path[1]) + if err != nil { + return err + } + delete(vtable, path[1]) + if len(vtable) == 0 { + delete(c.jsonData, path[0]) + } + case 3: // Third element must be field name + vtable, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + return err + } + ventry, err := DecodeJsonEntry(vtable, path[1]) + if err != nil { + return err + } + _, _, err = DecodeJsonField(ventry, path[2]) + if err != nil { + return err + } + delete(ventry, path[2]) + if len(ventry) == 0 { + delete(vtable, path[1]) + } + if len(vtable) == 0 { + delete(c.jsonData, path[0]) + } + case 4: // Fourth element must be list index + id, err := strconv.Atoi(path[3]) + if err != nil { + log.V(2).Infof("Invalid index %v", path[3]) + return fmt.Errorf("Invalid index %v", path[3]) + } + vtable, err := DecodeJsonTable(c.jsonData, path[0]) + if err != nil { + return err + } + ventry, err := DecodeJsonEntry(vtable, path[1]) + if err != nil { + return err + } + _, vlist, err := DecodeJsonField(ventry, path[2]) + if err != nil { + return err + } + _, err = DecodeJsonListItem(vlist, path[3]) + if err != nil { + return err + } + vlist = append(vlist[:id], vlist[id+1:]...) + ventry[path[2]] = vlist + if len(vlist) == 0 { + delete(ventry, path[2]) + } + if len(ventry) == 0 { + delete(vtable, path[1]) + } + if len(vtable) == 0 { + delete(c.jsonData, path[0]) + } + default: + log.V(2).Infof("Invalid db table Path %v", path) + return fmt.Errorf("Invalid db table Path %v", path) + } + + return nil +} \ No newline at end of file diff --git a/sonic_data_client/mixed_db_client.go b/sonic_data_client/mixed_db_client.go new file mode 100644 index 00000000..1e0b16c9 --- /dev/null +++ b/sonic_data_client/mixed_db_client.go @@ -0,0 +1,1112 @@ +package client + +// #cgo pkg-config: python3-embed +// #include +import "C" + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "reflect" + "strconv" + "strings" + "sync" + "time" + "unsafe" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + log "github.com/golang/glog" + "github.com/Workiva/go-datastructures/queue" + "github.com/sonic-net/sonic-gnmi/common_utils" + "github.com/sonic-net/sonic-gnmi/swsscommon" + sdcfg "github.com/sonic-net/sonic-gnmi/sonic_db_config" + spb "github.com/sonic-net/sonic-gnmi/proto" + ssc "github.com/sonic-net/sonic-gnmi/sonic_service_client" + gnmipb "github.com/openconfig/gnmi/proto/gnmi" +) + +const REDIS_SOCK string = "/var/run/redis/redis.sock" +const APPL_DB int = 0 +const SWSS_TIMEOUT uint = 0 +const CHECK_POINT_PATH string = "/etc/sonic" + +const ( + opAdd = iota + opRemove +) + +var ( + supportedModels = []gnmipb.ModelData{ + { + Name: "sonic-yang", + Organization: "SONiC", + Version: "0.1.0", + }, + { + Name: "sonic-db", + Organization: "SONiC", + Version: "0.1.0", + }, + } +) + +type MixedDbClient struct { + prefix *gnmipb.Path + paths []*gnmipb.Path + pathG2S map[*gnmipb.Path][]tablePath + q *queue.PriorityQueue + channel chan struct{} + target string + origin string + workPath string + jClient *JsonClient + applDB swsscommon.DBConnector + tableMap map[string]swsscommon.ProducerStateTable + + synced sync.WaitGroup // Control when to send gNMI sync_response + w *sync.WaitGroup // wait for all sub go routines to finish + mu sync.RWMutex // Mutex for data protection among routines for DbClient +} + +func parseJson(str []byte) (interface{}, error) { + var res interface{} + err := json.Unmarshal(str, &res) + if err != nil { + return res, fmt.Errorf("JSON unmarshalling error: %v", err) + } + return res, nil +} + +func ParseTarget(target string, paths []*gnmipb.Path) (string, error) { + if len(paths) == 0 { + return "", nil + } + for i, path := range paths { + elems := path.GetElem() + if elems == nil { + return "", status.Error(codes.Unimplemented, "No target specified in path") + } + if target == "" { + if i == 0 { + target = elems[0].GetName() + } + } else if target != elems[0].GetName() { + return "", status.Error(codes.Unimplemented, "Target conflict in path") + } + } + if target == "" { + return "", status.Error(codes.Unimplemented, "No target specified in path") + } + return target, nil +} + +func ParseOrigin(origin string, paths []*gnmipb.Path) (string, error) { + if len(paths) == 0 { + return origin, nil + } + for i, path := range paths { + if origin == "" { + if i == 0 { + origin = path.Origin + } + } else if origin != path.Origin { + return "", status.Error(codes.Unimplemented, "Origin conflict in path") + } + } + if origin == "" { + return origin, status.Error(codes.Unimplemented, "No origin specified in path") + } + return origin, nil +} + +func IsSupportedOrigin(origin string) bool { + for _, model := range supportedModels { + if model.Name == origin { + return true + } + } + return false +} + +func (c *MixedDbClient) DbSetTable(table string, key string, values map[string]string) error { + pt, ok := c.tableMap[table] + if !ok { + pt = swsscommon.NewProducerStateTable(c.applDB, table) + c.tableMap[table] = pt + } + pt.Set(key, values, "SET", "") + return nil +} + +func (c *MixedDbClient) DbDelTable(table string, key string) error { + pt, ok := c.tableMap[table] + if !ok { + pt = swsscommon.NewProducerStateTable(c.applDB, table) + c.tableMap[table] = pt + } + pt.Del(key, "DEL", "") + return nil +} + +func NewMixedDbClient(paths []*gnmipb.Path, prefix *gnmipb.Path) (Client, error) { + var client MixedDbClient + var err error + + // Testing program may ask to use redis local tcp connection + if UseRedisLocalTcpPort { + useRedisTcpClient() + } + + client.prefix = prefix + client.target = "" + client.origin = "" + if prefix != nil { + elems := prefix.GetElem() + if elems != nil { + client.target = elems[0].GetName() + } + client.origin = prefix.Origin + } + if paths == nil { + return &client, nil + } + + if client.target == "" { + client.target, err = ParseTarget(client.target, paths) + if err != nil { + return nil, err + } + } + if client.origin == "" { + client.origin, err = ParseOrigin(client.origin, paths) + if err != nil { + return nil, err + } + } + if check := IsSupportedOrigin(client.origin); !check { + return nil, status.Errorf(codes.Unimplemented, "Invalid origin: %s", client.origin) + } + if client.origin == "sonic-yang" { + return nil, status.Errorf(codes.Unimplemented, "SONiC Yang Schema is not implemented yet") + } + _, ok, _, _ := IsTargetDb(client.target); + if !ok { + return nil, status.Errorf(codes.Unimplemented, "Invalid target: %s", client.target) + } + client.paths = paths + client.workPath = common_utils.GNMI_WORK_PATH + client.applDB = swsscommon.NewDBConnector2(APPL_DB, REDIS_SOCK, SWSS_TIMEOUT) + client.tableMap = map[string]swsscommon.ProducerStateTable{} + + return &client, nil +} + +// gnmiFullPath builds the full path from the prefix and path. +func (c *MixedDbClient) gnmiFullPath(prefix, path *gnmipb.Path) *gnmipb.Path { + + fullPath := &gnmipb.Path{Origin: path.Origin} + if path.GetElement() != nil { + elements := path.GetElement() + if prefix != nil { + elements = append(prefix.GetElement(), elements...) + } + // Skip first elem + fullPath.Element = elements[1:] + } + if path.GetElem() != nil { + elems := path.GetElem() + if prefix != nil { + elems = append(prefix.GetElem(), elems...) + } + // Skip first elem + fullPath.Elem = elems[1:] + } + return fullPath +} + +func (c *MixedDbClient) populateAllDbtablePath(paths []*gnmipb.Path, pathG2S *map[*gnmipb.Path][]tablePath) error { + for _, path := range paths { + err := c.populateDbtablePath(path, nil, pathG2S) + if err != nil { + return err + } + } + return nil +} + +// Populate table path in DB from gnmi path +func (c *MixedDbClient) populateDbtablePath(path *gnmipb.Path, value *gnmipb.TypedValue, pathG2S *map[*gnmipb.Path][]tablePath) error { + var buffer bytes.Buffer + var dbPath string + var tblPath tablePath + + targetDbName, targetDbNameValid, targetDbNameSpace, _ := IsTargetDb(c.target) + // Verify it is a valid db name + if !targetDbNameValid { + return fmt.Errorf("Invalid target dbName %v", targetDbName) + } + + // Verify Namespace is valid + dbNamespace, ok := sdcfg.GetDbNamespaceFromTarget(targetDbNameSpace) + if !ok { + return fmt.Errorf("Invalid target dbNameSpace %v", targetDbNameSpace) + } + + fullPath := path + if c.prefix != nil { + fullPath = c.gnmiFullPath(c.prefix, path) + } + + stringSlice := []string{targetDbName} + separator, _ := GetTableKeySeparator(targetDbName, dbNamespace) + elems := fullPath.GetElem() + if elems != nil { + for i, elem := range elems { + // TODO: Usage of key field + log.V(6).Infof("index %d elem : %#v %#v", i, elem.GetName(), elem.GetKey()) + if i != 0 { + buffer.WriteString(separator) + } + buffer.WriteString(elem.GetName()) + stringSlice = append(stringSlice, elem.GetName()) + } + dbPath = buffer.String() + } + value_str := "" + if value != nil { + value_str = string(value.GetJsonIetfVal()) + } + + tblPath.dbNamespace = dbNamespace + tblPath.dbName = targetDbName + tblPath.tableName = stringSlice[1] + tblPath.delimitor = separator + tblPath.operation = opRemove + tblPath.index = -1 + if value != nil { + tblPath.operation = opAdd + tblPath.value = value_str + } + + var mappedKey string + if len(stringSlice) > 2 { // tmp, to remove mappedKey + mappedKey = stringSlice[2] + } + + redisDb, ok := Target2RedisDb[tblPath.dbNamespace][tblPath.dbName] + if !ok { + return fmt.Errorf("Redis Client not present for dbName %v dbNamespace %v", targetDbName, dbNamespace) + } + + // The expect real db path could be in one of the formats: + // <1> DB Table + // <2> DB Table Key + // <3> DB Table Field + // <4> DB Table Key Field + // <5> DB Table Key Field Index + switch len(stringSlice) { + case 2: // only table name provided + if tblPath.operation == opRemove { + res, err := redisDb.Keys(tblPath.tableName + "*").Result() + if err != nil || len(res) < 1 { + log.V(2).Infof("Invalid db table Path %v %v", c.target, dbPath) + return fmt.Errorf("Failed to find %v %v %v %v", c.target, dbPath, err, res) + } + } + tblPath.tableKey = "" + case 3: // Third element must be table key + if tblPath.operation == opRemove { + _, err := redisDb.Exists(tblPath.tableName + tblPath.delimitor + mappedKey).Result() + if err != nil { + return fmt.Errorf("redis Exists op failed for %v", dbPath) + } + } + tblPath.tableKey = mappedKey + case 4: // Fourth element must be field name + if tblPath.operation == opRemove { + _, err := redisDb.Exists(tblPath.tableName + tblPath.delimitor + mappedKey).Result() + if err != nil { + return fmt.Errorf("redis Exists op failed for %v", dbPath) + } + } + tblPath.tableKey = mappedKey + tblPath.field = stringSlice[3] + case 5: // Fifth element must be list index + if tblPath.operation == opRemove { + _, err := redisDb.Exists(tblPath.tableName + tblPath.delimitor + mappedKey).Result() + if err != nil { + return fmt.Errorf("redis Exists op failed for %v", dbPath) + } + } + tblPath.tableKey = mappedKey + tblPath.field = stringSlice[3] + index, err := strconv.Atoi(stringSlice[4]) + if err != nil { + return fmt.Errorf("Invalid index %v", stringSlice[4]) + } + tblPath.index = index + default: + log.V(2).Infof("Invalid db table Path %v", dbPath) + return fmt.Errorf("Invalid db table Path %v", dbPath) + } + + (*pathG2S)[path] = []tablePath{tblPath} + log.V(5).Infof("tablePath %+v", tblPath) + return nil +} + +// makeJSON renders the database Key op value_pairs to map[string]interface{} for JSON marshall. +func (c *MixedDbClient) makeJSON_redis(msi *map[string]interface{}, key *string, op *string, mfv map[string]string) error { + // TODO: Use Yang model to identify leaf-list + if key == nil && op == nil { + for f, v := range mfv { + if strings.HasSuffix(f, "@") { + k := strings.TrimSuffix(f, "@") + slice := strings.Split(v, ",") + (*msi)[k] = slice + } else { + (*msi)[f] = v + } + } + return nil + } + + fp := map[string]interface{}{} + for f, v := range mfv { + if strings.HasSuffix(f, "@") { + k := strings.TrimSuffix(f, "@") + slice := strings.Split(v, ",") + fp[k] = slice + } else { + fp[f] = v + } + } + + if key == nil { + (*msi)[*op] = fp + } else if op == nil { + (*msi)[*key] = fp + } else { + // Also have operation layer + of := map[string]interface{}{} + + of[*op] = fp + (*msi)[*key] = of + } + return nil +} + +// tableData2Msi renders the redis DB data to map[string]interface{} +// which may be marshaled to JSON format +// If only table name provided in the tablePath, find all keys in the table, otherwise +// Use tableName + tableKey as key to get all field value paires +func (c *MixedDbClient) tableData2Msi(tblPath *tablePath, useKey bool, op *string, msi *map[string]interface{}) error { + redisDb := Target2RedisDb[tblPath.dbNamespace][tblPath.dbName] + + var pattern string + var dbkeys []string + var err error + var fv map[string]string + + //Only table name provided + if tblPath.tableKey == "" { + // tables in COUNTERS_DB other than COUNTERS table doesn't have keys + if tblPath.dbName == "COUNTERS_DB" && tblPath.tableName != "COUNTERS" { + pattern = tblPath.tableName + } else { + pattern = tblPath.tableName + tblPath.delimitor + "*" + } + dbkeys, err = redisDb.Keys(pattern).Result() + if err != nil { + log.V(2).Infof("redis Keys failed for %v, pattern %s", tblPath, pattern) + return fmt.Errorf("redis Keys failed for %v, pattern %s %v", tblPath, pattern, err) + } + } else { + // both table name and key provided + dbkeys = []string{tblPath.tableName + tblPath.delimitor + tblPath.tableKey} + } + + for idx, dbkey := range dbkeys { + fv, err = redisDb.HGetAll(dbkey).Result() + if err != nil { + log.V(2).Infof("redis HGetAll failed for %v, dbkey %s", tblPath, dbkey) + return err + } + + if tblPath.jsonTableKey != "" { // If jsonTableKey was prepared, use it + err = c.makeJSON_redis(msi, &tblPath.jsonTableKey, op, fv) + } else if (tblPath.tableKey != "" && !useKey) || tblPath.tableName == dbkey { + err = c.makeJSON_redis(msi, nil, op, fv) + } else { + var key string + // Split dbkey string into two parts and second part is key in table + keys := strings.SplitN(dbkey, tblPath.delimitor, 2) + key = keys[1] + err = c.makeJSON_redis(msi, &key, op, fv) + } + if err != nil { + log.V(2).Infof("makeJSON err %s for fv %v", err, fv) + return err + } + log.V(6).Infof("Added idex %v fv %v ", idx, fv) + } + return nil +} + +func (c *MixedDbClient) tableData2TypedValue(tblPaths []tablePath, op *string) (*gnmipb.TypedValue, error) { + var useKey bool + msi := make(map[string]interface{}) + for _, tblPath := range tblPaths { + redisDb := Target2RedisDb[tblPath.dbNamespace][tblPath.dbName] + + if tblPath.jsonField == "" { // Not asked to include field in json value, which means not wildcard query + // table path includes table, key and field + if tblPath.field != "" { + if len(tblPaths) != 1 { + log.V(2).Infof("WARNING: more than one path exists for field granularity query: %v", tblPaths) + } + var key string + if tblPath.tableKey != "" { + key = tblPath.tableName + tblPath.delimitor + tblPath.tableKey + } else { + key = tblPath.tableName + } + + // TODO: Use Yang model to identify leaf-list + if tblPath.index >= 0 { + field := tblPath.field + "@" + val, err := redisDb.HGet(key, field).Result() + if err != nil { + log.V(2).Infof("redis HGet failed for %v", tblPath) + return nil, err + } + slice := strings.Split(val, ",") + if tblPath.index >= len(slice) { + return nil, fmt.Errorf("Invalid index %v for %v", tblPath.index, slice) + } + return &gnmipb.TypedValue{ + Value: &gnmipb.TypedValue_JsonIetfVal{ + JsonIetfVal: []byte(`"` + slice[tblPath.index] + `"`), + }}, nil + } else { + field := tblPath.field + val, err := redisDb.HGet(key, field).Result() + if err == nil { + return &gnmipb.TypedValue{ + Value: &gnmipb.TypedValue_JsonIetfVal{ + JsonIetfVal: []byte(`"` + val + `"`), + }}, nil + } + field = field + "@" + val, err = redisDb.HGet(key, field).Result() + if err == nil { + var output []byte + slice := strings.Split(val, ",") + output, err = json.Marshal(slice) + if err != nil { + return nil, err + } + return &gnmipb.TypedValue{ + Value: &gnmipb.TypedValue_JsonIetfVal{ + JsonIetfVal: []byte(output), + }}, nil + } + log.V(2).Infof("redis HGet failed for %v", tblPath) + return nil, err + } + } + } + + err := c.tableData2Msi(&tblPath, useKey, nil, &msi) + if err != nil { + return nil, err + } + } + return msi2TypedValue(msi) +} + +func ConvertDbEntry(inputData map[string]interface{}) map[string]string { + outputData := map[string]string{} + for key, value := range inputData { + switch value.(type) { + case string: + outputData[key] = value.(string) + case []interface{}: + list := value.([]interface{}) + key_redis := key + "@" + slice := []string{} + for _, item := range(list) { + if str, check := item.(string); check { + slice = append(slice, str) + } else { + continue + } + } + str_val := strings.Join(slice, ",") + outputData[key_redis] = str_val + } + } + return outputData +} + +func (c *MixedDbClient) handleTableData(tblPaths []tablePath) error { + var pattern string + var dbkeys []string + var err error + var res interface{} + + for _, tblPath := range tblPaths { + log.V(5).Infof("handleTableData: tblPath %v", tblPath) + redisDb := Target2RedisDb[tblPath.dbNamespace][tblPath.dbName] + + if tblPath.jsonField == "" { // Not asked to include field in json value, which means not wildcard query + // table path includes table, key and field + if tblPath.field != "" { + if len(tblPaths) != 1 { + log.V(2).Infof("WARNING: more than one path exists for field granularity query: %v", tblPaths) + } + return fmt.Errorf("Unsupported path %v, can't update field", tblPath) + } + } + + if tblPath.operation == opRemove { + //Only table name provided + if tblPath.tableKey == "" { + // tables in COUNTERS_DB other than COUNTERS table doesn't have keys + if tblPath.dbName == "COUNTERS_DB" && tblPath.tableName != "COUNTERS" { + pattern = tblPath.tableName + } else { + pattern = tblPath.tableName + tblPath.delimitor + "*" + } + // Can't remove entry in temporary state table + dbkeys, err = redisDb.Keys(pattern).Result() + if err != nil { + log.V(2).Infof("redis Keys failed for %v, pattern %s", tblPath, pattern) + return fmt.Errorf("redis Keys failed for %v, pattern %s %v", tblPath, pattern, err) + } + } else { + // both table name and key provided + dbkeys = []string{tblPath.tableName + tblPath.delimitor + tblPath.tableKey} + } + + for _, dbkey := range dbkeys { + tableKey := strings.TrimPrefix(dbkey, tblPath.tableName + tblPath.delimitor) + err = c.DbDelTable(tblPath.tableName, tableKey) + if err != nil { + log.V(2).Infof("swsscommon delete failed for %v, dbkey %s", tblPath, dbkey) + return err + } + } + } else if tblPath.operation == opAdd { + if tblPath.tableKey != "" { + // both table name and key provided + res, err = parseJson([]byte(tblPath.value)) + if err != nil { + return err + } + if vtable, ok := res.(map[string]interface{}); ok { + configMap := make(map[string]interface{}) + tableMap := make(map[string]interface{}) + tableMap[tblPath.tableKey] = vtable + configMap[tblPath.tableName] = tableMap + ietf_json_val, err := emitJSON(&configMap) + if err != nil { + return fmt.Errorf("Translate to json failed!") + } + PyCodeInGo := fmt.Sprintf(PyCodeForYang, ietf_json_val) + err = RunPyCode(PyCodeInGo) + if err != nil { + return fmt.Errorf("Yang validation failed!") + } + outputData := ConvertDbEntry(vtable) + c.DbDelTable(tblPath.tableName, tblPath.tableKey) + err = c.DbSetTable(tblPath.tableName, tblPath.tableKey, outputData) + if err != nil { + log.V(2).Infof("swsscommon update failed for %v, value %v", tblPath, outputData) + return err + } + } else { + return fmt.Errorf("Key %v: Unsupported value %v type %v", tblPath.tableKey, res, reflect.TypeOf(res)) + } + } else { + res, err = parseJson([]byte(tblPath.value)) + if err != nil { + return err + } + if vtable, ok := res.(map[string]interface{}); ok { + configMap := make(map[string]interface{}) + configMap[tblPath.tableName] = vtable + ietf_json_val, err := emitJSON(&configMap) + if err != nil { + return fmt.Errorf("Translate to json failed!") + } + PyCodeInGo := fmt.Sprintf(PyCodeForYang, ietf_json_val) + err = RunPyCode(PyCodeInGo) + if err != nil { + return fmt.Errorf("Yang validation failed!") + } + for tableKey, tres := range vtable { + if vt, ret := tres.(map[string]interface{}); ret { + outputData := ConvertDbEntry(vt) + c.DbDelTable(tblPath.tableName, tableKey) + err = c.DbSetTable(tblPath.tableName, tableKey, outputData) + if err != nil { + log.V(2).Infof("swsscommon update failed for %v, value %v", tblPath, outputData) + return err + } + } else { + return fmt.Errorf("Key %v: Unsupported value %v type %v", tableKey, tres, reflect.TypeOf(tres)) + } + } + } else { + return fmt.Errorf("Unsupported value %v type %v", res, reflect.TypeOf(res)) + } + } + } else { + return fmt.Errorf("Unsupported operation %v", tblPath.operation) + } + + } + return nil +} + +/* Populate the JsonPatch corresponding each GNMI operation. */ +func (c *MixedDbClient) ConvertToJsonPatch(prefix *gnmipb.Path, path *gnmipb.Path, t *gnmipb.TypedValue, output *string) error { + if t != nil { + if len(t.GetJsonIetfVal()) == 0 { + return fmt.Errorf("Value encoding is not IETF JSON") + } + } + fullPath := path + if prefix != nil { + fullPath = c.gnmiFullPath(prefix, path) + } + + elems := fullPath.GetElem() + if t == nil { + *output = `{"op": "remove", "path": "/` + } else { + *output = `{"op": "add", "path": "/` + } + + if elems != nil { + /* Iterate through elements. */ + for _, elem := range elems { + *output += elem.GetName() + key := elem.GetKey() + /* If no keys are present end the element with "/" */ + if key == nil { + *output += `/` + } + + /* If keys are present , process the keys. */ + if key != nil { + for k, v := range key { + *output += `[` + k + `=` + v + `]` + } + + /* Append "/" after all keys are processed. */ + *output += `/` + } + } + } + + /* Trim the "/" at the end which is not required. */ + *output = strings.TrimSuffix(*output, `/`) + if t == nil { + *output += `"}` + } else { + str := string(t.GetJsonIetfVal()) + val := strings.Replace(str, "\n", "", -1) + *output += `", "value": ` + val + `}` + } + return nil +} + +func RunPyCode(text string) error { + defer C.Py_Finalize() + C.Py_Initialize() + PyCodeInC := C.CString(text) + defer C.free(unsafe.Pointer(PyCodeInC)) + CRet := C.PyRun_SimpleString(PyCodeInC) + if int(CRet) != 0 { + return fmt.Errorf("Python failure") + } + return nil +} + +var PyCodeForYang string = +` +import sonic_yang +import json + +yang_parser = sonic_yang.SonicYang("/usr/local/yang-models") +yang_parser.loadYangModel() +text = '''%s''' + +try: + yang_parser.loadData(configdbJson=json.loads(text)) + yang_parser.validate_data_tree() +except sonic_yang.SonicYangException as e: + print("Yang validation error: {}".format(str(e))) + raise +` + +func (c *MixedDbClient) SetIncrementalConfig(delete []*gnmipb.Path, replace []*gnmipb.Update, update []*gnmipb.Update) error { + var err error + var curr string + + var sc ssc.Service + sc, err = ssc.NewDbusClient() + if err != nil { + return err + } + err = sc.CreateCheckPoint(CHECK_POINT_PATH + "/config") + if err != nil { + return err + } + defer sc.DeleteCheckPoint(CHECK_POINT_PATH + "/config") + fileName := CHECK_POINT_PATH + "/config.cp.json" + c.jClient, err = NewJsonClient(fileName) + if err != nil { + return err + } + + text := `[` + /* DELETE */ + for _, path := range delete { + fullPath := path + if c.prefix != nil { + fullPath = c.gnmiFullPath(c.prefix, path) + } + log.V(2).Infof("Path #%v", fullPath) + + stringSlice := []string{} + elems := fullPath.GetElem() + if elems != nil { + for i, elem := range elems { + // TODO: Usage of key field + log.V(6).Infof("index %d elem : %#v %#v", i, elem.GetName(), elem.GetKey()) + stringSlice = append(stringSlice, elem.GetName()) + } + err := c.jClient.Remove(stringSlice) + if err != nil { + // Remove failed, ignore + continue + } + } + curr = `` + err = c.ConvertToJsonPatch(c.prefix, path, nil, &curr) + if err != nil { + return err + } + text += curr + `,` + } + + /* REPLACE */ + for _, path := range replace { + fullPath := path.GetPath() + if c.prefix != nil { + fullPath = c.gnmiFullPath(c.prefix, path.GetPath()) + } + log.V(2).Infof("Path #%v", fullPath) + + stringSlice := []string{} + elems := fullPath.GetElem() + if elems != nil { + for i, elem := range elems { + // TODO: Usage of key field + log.V(6).Infof("index %d elem : %#v %#v", i, elem.GetName(), elem.GetKey()) + stringSlice = append(stringSlice, elem.GetName()) + } + t := path.GetVal() + if t == nil { + err := c.jClient.Remove(stringSlice) + if err != nil { + // Remove failed, ignore + continue + } + } else { + err := c.jClient.Add(stringSlice, string(t.GetJsonIetfVal())) + if err != nil { + // Add failed + return err + } + } + } + curr = `` + err = c.ConvertToJsonPatch(c.prefix, path.GetPath(), path.GetVal(), &curr) + if err != nil { + return err + } + text += curr + `,` + } + + /* UPDATE */ + for _, path := range update { + fullPath := path.GetPath() + if c.prefix != nil { + fullPath = c.gnmiFullPath(c.prefix, path.GetPath()) + } + log.V(2).Infof("Path #%v", fullPath) + + stringSlice := []string{} + elems := fullPath.GetElem() + if elems != nil { + for i, elem := range elems { + // TODO: Usage of key field + log.V(6).Infof("index %d elem : %#v %#v", i, elem.GetName(), elem.GetKey()) + stringSlice = append(stringSlice, elem.GetName()) + } + t := path.GetVal() + if t == nil { + return fmt.Errorf("Invalid update %v", path) + } else { + err := c.jClient.Add(stringSlice, string(t.GetJsonIetfVal())) + if err != nil { + // Add failed + return err + } + } + } + curr = `` + err = c.ConvertToJsonPatch(c.prefix, path.GetPath(), path.GetVal(), &curr) + if err != nil { + return err + } + text += curr + `,` + } + text = strings.TrimSuffix(text, `,`) + text += `]` + log.V(2).Infof("JsonPatch: %s", text) + if text == `[]` { + // No need to apply patch + return nil + } + patchFile := c.workPath + "/gcu.patch" + err = ioutil.WriteFile(patchFile, []byte(text), 0644) + if err != nil { + return err + } + + if c.origin == "sonic-db" { + err = sc.ApplyPatchDb(text) + } + + if err == nil { + err = sc.ConfigSave("/etc/sonic/config_db.json") + } + return err +} + +func (c *MixedDbClient) SetFullConfig(delete []*gnmipb.Path, replace []*gnmipb.Update, update []*gnmipb.Update) error { + val := update[0].GetVal() + ietf_json_val := val.GetJsonIetfVal() + if len(ietf_json_val) == 0 { + return fmt.Errorf("Value encoding is not IETF JSON") + } + content := []byte(ietf_json_val) + fileName := c.workPath + "/config_db.json.tmp" + err := ioutil.WriteFile(fileName, content, 0644) + if err != nil { + return err + } + + PyCodeInGo := fmt.Sprintf(PyCodeForYang, ietf_json_val) + err = RunPyCode(PyCodeInGo) + if err != nil { + return fmt.Errorf("Yang validation failed!") + } + + return nil +} + +func (c *MixedDbClient) SetDB(delete []*gnmipb.Path, replace []*gnmipb.Update, update []*gnmipb.Update) error { + /* DELETE */ + deleteMap := make(map[*gnmipb.Path][]tablePath) + err := c.populateAllDbtablePath(delete, &deleteMap) + if err != nil { + return err + } + + for _, tblPaths := range deleteMap { + err = c.handleTableData(tblPaths) + if err != nil { + return err + } + } + + /* REPLACE */ + replaceMap := make(map[*gnmipb.Path][]tablePath) + for _, item := range replace { + err = c.populateDbtablePath(item.GetPath(), item.GetVal(), &replaceMap) + if err != nil { + return err + } + } + for _, tblPaths := range replaceMap { + err = c.handleTableData(tblPaths) + if err != nil { + return err + } + } + + /* UPDATE */ + updateMap := make(map[*gnmipb.Path][]tablePath) + for _, item := range update { + err = c.populateDbtablePath(item.GetPath(), item.GetVal(), &updateMap) + if err != nil { + return err + } + } + for _, tblPaths := range updateMap { + err = c.handleTableData(tblPaths) + if err != nil { + return err + } + } + return nil +} + +func (c *MixedDbClient) SetConfigDB(delete []*gnmipb.Path, replace []*gnmipb.Update, update []*gnmipb.Update) error { + // Full configuration will be overwritten next set request + fileName := c.workPath + "/config_db.json.tmp" + os.Remove(fileName) + + deleteLen := len(delete) + replaceLen := len(replace) + updateLen := len(update) + if (deleteLen == 1 && replaceLen == 0 && updateLen == 1) { + deletePath := c.gnmiFullPath(c.prefix, delete[0]) + updatePath := c.gnmiFullPath(c.prefix, update[0].GetPath()) + if (len(deletePath.GetElem()) == 0) && (len(updatePath.GetElem()) == 0) { + return c.SetFullConfig(delete, replace, update) + } + } + return c.SetIncrementalConfig(delete, replace, update) +} + +func (c *MixedDbClient) Set(delete []*gnmipb.Path, replace []*gnmipb.Update, update []*gnmipb.Update) error { + if c.target == "CONFIG_DB" { + return c.SetConfigDB(delete, replace, update) + } else if c.target == "APPL_DB" { + return c.SetDB(delete, replace, update) + } + return fmt.Errorf("Set RPC does not support %v", c.target) +} + +func (c *MixedDbClient) GetCheckPoint() ([]*spb.Value, error) { + var values []*spb.Value + var err error + ts := time.Now() + + fileName := CHECK_POINT_PATH + "/config.cp.json" + c.jClient, err = NewJsonClient(fileName) + if err != nil { + return nil, fmt.Errorf("There's no check point") + } + log.V(2).Infof("Getting #%v", c.jClient.jsonData) + for _, path := range c.paths { + fullPath := path + if c.prefix != nil { + fullPath = c.gnmiFullPath(c.prefix, path) + } + log.V(2).Infof("Path #%v", fullPath) + + stringSlice := []string{} + elems := fullPath.GetElem() + if elems != nil { + for i, elem := range elems { + // TODO: Usage of key field + log.V(6).Infof("index %d elem : %#v %#v", i, elem.GetName(), elem.GetKey()) + stringSlice = append(stringSlice, elem.GetName()) + } + jv, err := c.jClient.Get(stringSlice) + if err != nil { + return nil, err + } + + val := gnmipb.TypedValue{ + Value: &gnmipb.TypedValue_JsonIetfVal{JsonIetfVal: jv}, + } + values = append(values, &spb.Value{ + Prefix: c.prefix, + Path: path, + Timestamp: ts.UnixNano(), + Val: &val, + }) + } + } + + return values, nil +} + +func (c *MixedDbClient) Get(w *sync.WaitGroup) ([]*spb.Value, error) { + if c.target == "CONFIG_DB" { + ret, err := c.GetCheckPoint() + if err == nil { + return ret, err + } + log.V(6).Infof("Error #%v", err) + } + + if c.paths != nil { + c.pathG2S = make(map[*gnmipb.Path][]tablePath) + err := c.populateAllDbtablePath(c.paths, &c.pathG2S) + if err != nil { + return nil, err + } + } + + var values []*spb.Value + ts := time.Now() + for gnmiPath, tblPaths := range c.pathG2S { + val, err := c.tableData2TypedValue(tblPaths, nil) + if err != nil { + return nil, err + } + + values = append(values, &spb.Value{ + Prefix: c.prefix, + Path: gnmiPath, + Timestamp: ts.UnixNano(), + Val: val, + }) + } + log.V(6).Infof("Getting #%v", values) + log.V(4).Infof("Get done, total time taken: %v ms", int64(time.Since(ts)/time.Millisecond)) + return values, nil +} + +func (c *MixedDbClient) OnceRun(q *queue.PriorityQueue, once chan struct{}, w *sync.WaitGroup, subscribe *gnmipb.SubscriptionList) { + return +} + +func (c *MixedDbClient) PollRun(q *queue.PriorityQueue, poll chan struct{}, w *sync.WaitGroup, subscribe *gnmipb.SubscriptionList) { + return +} + +func (c *MixedDbClient) StreamRun(q *queue.PriorityQueue, stop chan struct{}, w *sync.WaitGroup, subscribe *gnmipb.SubscriptionList) { + return +} + +func (c *MixedDbClient) Capabilities() []gnmipb.ModelData { + return supportedModels +} + +func (c *MixedDbClient) Close() error { + for _, pt := range c.tableMap { + pt.Delete() + } + return nil +} + +func (c *MixedDbClient) SentOne(val *Value) { +} + +func (c *MixedDbClient) FailedSend() { +} + diff --git a/sonic_service_client/dbus_client.go b/sonic_service_client/dbus_client.go new file mode 100644 index 00000000..57b0358b --- /dev/null +++ b/sonic_service_client/dbus_client.go @@ -0,0 +1,149 @@ +package host_service + +import ( + "time" + "fmt" + "reflect" + log "github.com/golang/glog" + "github.com/godbus/dbus/v5" + "github.com/sonic-net/sonic-gnmi/common_utils" +) + +type Service interface { + ConfigReload(fileName string) error + ConfigSave(fileName string) error + ApplyPatchYang(fileName string) error + ApplyPatchDb(fileName string) error + CreateCheckPoint(cpName string) error + DeleteCheckPoint(cpName string) error +} + +type DbusClient struct { + busNamePrefix string + busPathPrefix string + intNamePrefix string + channel chan struct{} +} + +func NewDbusClient() (Service, error) { + var client DbusClient + var err error + + client.busNamePrefix = "org.SONiC.HostService." + client.busPathPrefix = "/org/SONiC/HostService/" + client.intNamePrefix = "org.SONiC.HostService." + err = nil + + return &client, err +} + +func DbusApi(busName string, busPath string, intName string, timeout int, args ...interface{}) error { + common_utils.IncCounter("DBUS") + conn, err := dbus.SystemBus() + if err != nil { + log.V(2).Infof("Failed to connect to system bus: %v", err) + common_utils.IncCounter("DBUS fail") + return err + } + + ch := make(chan *dbus.Call, 1) + obj := conn.Object(busName, dbus.ObjectPath(busPath)) + obj.Go(intName, 0, ch, args...) + select { + case call := <-ch: + if call.Err != nil { + common_utils.IncCounter("DBUS fail") + return call.Err + } + result := call.Body + if len(result) == 0 { + common_utils.IncCounter("DBUS fail") + return fmt.Errorf("Dbus result is empty %v", result) + } + if ret, ok := result[0].(int32); ok { + if ret == 0 { + return nil + } else { + if len(result) != 2 { + common_utils.IncCounter("DBUS fail") + return fmt.Errorf("Dbus result is invalid %v", result) + } + if msg, check := result[1].(string); check { + common_utils.IncCounter("DBUS fail") + return fmt.Errorf(msg) + } else { + common_utils.IncCounter("DBUS fail") + return fmt.Errorf("Invalid result message type %v %v", result[1], reflect.TypeOf(result[1])) + } + } + } else { + common_utils.IncCounter("DBUS fail") + return fmt.Errorf("Invalid result type %v %v", result[0], reflect.TypeOf(result[0])) + } + case <-time.After(time.Duration(timeout) * time.Second): + log.V(2).Infof("DbusApi: timeout") + common_utils.IncCounter("DBUS fail") + return fmt.Errorf("Timeout %v", timeout) + } + return nil +} + +func (c *DbusClient) ConfigReload(config string) error { + common_utils.IncCounter("DBUS config reload") + modName := "config" + busName := c.busNamePrefix + modName + busPath := c.busPathPrefix + modName + intName := c.intNamePrefix + modName + ".reload" + err := DbusApi(busName, busPath, intName, 10, config) + return err +} + +func (c *DbusClient) ConfigSave(fileName string) error { + common_utils.IncCounter("DBUS config save") + modName := "config" + busName := c.busNamePrefix + modName + busPath := c.busPathPrefix + modName + intName := c.intNamePrefix + modName + ".save" + err := DbusApi(busName, busPath, intName, 10, fileName) + return err +} + +func (c *DbusClient) ApplyPatchYang(patch string) error { + common_utils.IncCounter("DBUS apply patch yang") + modName := "gcu" + busName := c.busNamePrefix + modName + busPath := c.busPathPrefix + modName + intName := c.intNamePrefix + modName + ".apply_patch_yang" + err := DbusApi(busName, busPath, intName, 10, patch) + return err +} + +func (c *DbusClient) ApplyPatchDb(patch string) error { + common_utils.IncCounter("DBUS apply patch db") + modName := "gcu" + busName := c.busNamePrefix + modName + busPath := c.busPathPrefix + modName + intName := c.intNamePrefix + modName + ".apply_patch_db" + err := DbusApi(busName, busPath, intName, 10, patch) + return err +} + +func (c *DbusClient) CreateCheckPoint(fileName string) error { + common_utils.IncCounter("DBUS create checkpoint") + modName := "gcu" + busName := c.busNamePrefix + modName + busPath := c.busPathPrefix + modName + intName := c.intNamePrefix + modName + ".create_checkpoint" + err := DbusApi(busName, busPath, intName, 10, fileName) + return err +} + +func (c *DbusClient) DeleteCheckPoint(fileName string) error { + common_utils.IncCounter("DBUS delete checkpoint") + modName := "gcu" + busName := c.busNamePrefix + modName + busPath := c.busPathPrefix + modName + intName := c.intNamePrefix + modName + ".delete_checkpoint" + err := DbusApi(busName, busPath, intName, 10, fileName) + return err +} diff --git a/sonic_service_client/dbus_client_test.go b/sonic_service_client/dbus_client_test.go new file mode 100644 index 00000000..aced4123 --- /dev/null +++ b/sonic_service_client/dbus_client_test.go @@ -0,0 +1,409 @@ +package host_service + +import ( + "testing" + "reflect" + + "github.com/agiledragon/gomonkey/v2" + "github.com/godbus/dbus/v5" +) + +func TestSystemBusNegative(t *testing.T) { + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.ConfigReload("abc") + if err == nil { + t.Errorf("SystemBus should fail") + } +} + +func TestConfigReload(t *testing.T) { + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.config.reload" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(0) + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.ConfigReload("abc") + if err != nil { + t.Errorf("ConfigReload should pass: %v", err) + } +} + +func TestConfigReloadNegative(t *testing.T) { + err_msg := "This is the mock error message" + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.config.reload" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(1) + ret.Body[1] = err_msg + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.ConfigReload("abc") + if err == nil { + t.Errorf("ConfigReload should fail") + } + if err.Error() != err_msg { + t.Errorf("Wrong error: %v", err) + } +} + +func TestConfigReloadTimeout(t *testing.T) { + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.config.reload" { + t.Errorf("Wrong method: %v", method) + } + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.ConfigReload("abc") + if err == nil { + t.Errorf("ConfigReload should timeout: %v", err) + } +} + +func TestConfigSave(t *testing.T) { + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.config.save" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(0) + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.ConfigSave("abc") + if err != nil { + t.Errorf("ConfigSave should pass: %v", err) + } +} + +func TestConfigSaveNegative(t *testing.T) { + err_msg := "This is the mock error message" + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.config.save" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(1) + ret.Body[1] = err_msg + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.ConfigSave("abc") + if err == nil { + t.Errorf("ConfigSave should fail") + } + if err.Error() != err_msg { + t.Errorf("Wrong error: %v", err) + } +} + +func TestApplyPatchYang(t *testing.T) { + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.gcu.apply_patch_yang" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(0) + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.ApplyPatchYang("abc") + if err != nil { + t.Errorf("ApplyPatchYang should pass: %v", err) + } +} + +func TestApplyPatchYangNegative(t *testing.T) { + err_msg := "This is the mock error message" + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.gcu.apply_patch_yang" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(1) + ret.Body[1] = err_msg + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.ApplyPatchYang("abc") + if err == nil { + t.Errorf("ApplyPatchYang should fail") + } + if err.Error() != err_msg { + t.Errorf("Wrong error: %v", err) + } +} + +func TestApplyPatchDb(t *testing.T) { + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.gcu.apply_patch_db" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(0) + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.ApplyPatchDb("abc") + if err != nil { + t.Errorf("ApplyPatchDb should pass: %v", err) + } +} + +func TestApplyPatchDbNegative(t *testing.T) { + err_msg := "This is the mock error message" + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.gcu.apply_patch_db" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(1) + ret.Body[1] = err_msg + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.ApplyPatchDb("abc") + if err == nil { + t.Errorf("ApplyPatchDb should fail") + } + if err.Error() != err_msg { + t.Errorf("Wrong error: %v", err) + } +} + +func TestCreateCheckPoint(t *testing.T) { + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.gcu.create_checkpoint" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(0) + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.CreateCheckPoint("abc") + if err != nil { + t.Errorf("CreateCheckPoint should pass: %v", err) + } +} + +func TestCreateCheckPointNegative(t *testing.T) { + err_msg := "This is the mock error message" + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.gcu.create_checkpoint" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(1) + ret.Body[1] = err_msg + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.CreateCheckPoint("abc") + if err == nil { + t.Errorf("CreateCheckPoint should fail") + } + if err.Error() != err_msg { + t.Errorf("Wrong error: %v", err) + } +} + +func TestDeleteCheckPoint(t *testing.T) { + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.gcu.delete_checkpoint" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(0) + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.DeleteCheckPoint("abc") + if err != nil { + t.Errorf("DeleteCheckPoint should pass: %v", err) + } +} + +func TestDeleteCheckPointNegative(t *testing.T) { + err_msg := "This is the mock error message" + mock1 := gomonkey.ApplyFunc(dbus.SystemBus, func() (conn *dbus.Conn, err error) { + return &dbus.Conn{}, nil + }) + defer mock1.Reset() + mock2 := gomonkey.ApplyMethod(reflect.TypeOf(&dbus.Object{}), "Go", func(obj *dbus.Object, method string, flags dbus.Flags, ch chan *dbus.Call, args ...interface{}) *dbus.Call { + if method != "org.SONiC.HostService.gcu.delete_checkpoint" { + t.Errorf("Wrong method: %v", method) + } + ret := &dbus.Call{} + ret.Err = nil + ret.Body = make([]interface{}, 2) + ret.Body[0] = int32(1) + ret.Body[1] = err_msg + ch <- ret + return &dbus.Call{} + }) + defer mock2.Reset() + + client, err := NewDbusClient() + if err != nil { + t.Errorf("NewDbusClient failed: %v", err) + } + err = client.DeleteCheckPoint("abc") + if err == nil { + t.Errorf("DeleteCheckPoint should fail") + } + if err.Error() != err_msg { + t.Errorf("Wrong error: %v", err) + } +} diff --git a/swsscommon/dbconnector.go b/swsscommon/dbconnector.go new file mode 100644 index 00000000..7b0d6394 --- /dev/null +++ b/swsscommon/dbconnector.go @@ -0,0 +1,45 @@ +package swsscommon + +// #cgo LDFLAGS: -lcswsscommon -lswsscommon -lstdc++ +// #include +// #include +import "C" + +import ( + "unsafe" +) + +type DBConnector struct { + ptr unsafe.Pointer +} + +func NewDBConnector(db int, hostname string, port int, timeout uint) DBConnector { + hostnameC := C.CString(hostname) + defer C.free(unsafe.Pointer(hostnameC)) + dbc := C.db_connector_new(C.int(db), hostnameC, C.int(port), C.uint(timeout)) + return DBConnector{ptr: unsafe.Pointer(dbc)} +} + +func NewDBConnector2(db int, unixPath string, timeout uint) DBConnector { + unixPathC := C.CString(unixPath) + defer C.free(unsafe.Pointer(unixPathC)) + dbc := C.db_connector_new2(C.int(db), unixPathC, C.uint(timeout)) + return DBConnector{ptr: unsafe.Pointer(dbc)} +} + +func (db DBConnector) Delete() { + C.db_connector_delete(C.db_connector_t(db.ptr)) +} + +func (db DBConnector) GetDB() int { + return int(C.db_connector_get_db(C.db_connector_t(db.ptr))) +} + +func DBConnectorSelect(db DBConnector) { + C.db_connector_select(C.db_connector_t(db.ptr)) +} + +func (db DBConnector) NewConnector(timeout uint) DBConnector { + dbc := C.db_connector_new_connector(C.db_connector_t(db.ptr), C.uint(timeout)) + return DBConnector{ptr: unsafe.Pointer(dbc)}; +} diff --git a/swsscommon/producerstatetable.go b/swsscommon/producerstatetable.go new file mode 100644 index 00000000..a50e9407 --- /dev/null +++ b/swsscommon/producerstatetable.go @@ -0,0 +1,96 @@ +package swsscommon + +// #cgo LDFLAGS: -lcswsscommon -lswsscommon -lstdc++ +// #include +// #include +import "C" + +import ( + "unsafe" +) + +type ProducerStateTable struct { + ptr unsafe.Pointer + table string +} + + +func NewProducerStateTable(db DBConnector, tableName string) ProducerStateTable { + tableNameC := C.CString(tableName) + defer C.free(unsafe.Pointer(tableNameC)) + + pt := C.producer_state_table_new(C.db_connector_t2(db.ptr), tableNameC) + return ProducerStateTable{ptr: unsafe.Pointer(pt), table: tableName} +} + +func (pt ProducerStateTable) Delete() { + C.producer_state_table_delete(C.producer_state_table_t(pt.ptr)) +} + +func (pt ProducerStateTable) SetBuffered(buffered bool) { + C.producer_state_table_set_buffered(C.producer_state_table_t(pt.ptr), C._Bool(buffered)) +} + +func (pt ProducerStateTable) Set(key string, values map[string]string, op string, prefix string) { + /* + log.Printf( + "trace: swss: %s %s:%s %s", + op, + pt.table, + key, + values, + ) + */ + + keyC := C.CString(key) + defer C.free(unsafe.Pointer(keyC)) + opC := C.CString(op) + defer C.free(unsafe.Pointer(opC)) + prefixC := C.CString(prefix) + defer C.free(unsafe.Pointer(prefixC)) + + count := len(values) + tuplePtr := (*C.field_value_tuple_t)(C.malloc(C.size_t(C.sizeof_field_value_tuple_t * count))) + defer C.free(unsafe.Pointer(tuplePtr)) + // Get a Go slice to the C array - this doesn't allocate anything + tuples := (*[(1 << 28) - 1]C.field_value_tuple_t)(unsafe.Pointer(tuplePtr))[:count:count] + + idx := 0 + for k, v := range values { + kC := C.CString(k) + defer C.free(unsafe.Pointer(kC)) + vC := C.CString(v) + defer C.free(unsafe.Pointer(vC)) + tuples[idx] = C.field_value_tuple_t{ + field: (*C.char)(kC), + value: (*C.char)(vC), + } + idx = idx + 1 + } + + C.producer_state_table_set(C.producer_state_table_t(pt.ptr), keyC, tuplePtr, C.size_t(count), opC, prefixC) +} + +func (pt ProducerStateTable) Del(key string, op string, prefix string) { + /* + log.Printf( + "trace: swss: %s %s:%s", + op, + pt.table, + key, + ) + */ + + keyC := C.CString(key) + defer C.free(unsafe.Pointer(keyC)) + opC := C.CString(op) + defer C.free(unsafe.Pointer(opC)) + prefixC := C.CString(prefix) + defer C.free(unsafe.Pointer(prefixC)) + + C.producer_state_table_del(C.producer_state_table_t(pt.ptr), keyC, opC, prefixC) +} + +func (pt ProducerStateTable) Flush() { + C.producer_state_table_flush(C.producer_state_table_t(pt.ptr)) +} diff --git a/swsscommon/table.go b/swsscommon/table.go new file mode 100644 index 00000000..a2bf3f6f --- /dev/null +++ b/swsscommon/table.go @@ -0,0 +1,93 @@ +package swsscommon + +// #cgo LDFLAGS: -lcswsscommon -lswsscommon -lstdc++ +// #include +// #include +import "C" + +import ( + "log" + "unsafe" +) + +type Table struct { + ptr unsafe.Pointer + table string +} + + +func NewTable(db DBConnector, tableName string) Table { + tableNameC := C.CString(tableName) + defer C.free(unsafe.Pointer(tableNameC)) + + pt := C.table_new(C.db_connector_t2(db.ptr), tableNameC) + return Table{ptr: unsafe.Pointer(pt), table: tableName} +} + +func (pt Table) Delete() { + C.table_delete(C.table_t(pt.ptr)) +} + +func (pt Table) SetBuffered(buffered bool) { + C.table_set_buffered(C.table_t(pt.ptr), C._Bool(buffered)) +} + +func (pt Table) Set(key string, values map[string]string, op string, prefix string) { + log.Printf( + "trace: swss: %s %s:%s %s", + op, + pt.table, + key, + values, + ) + + keyC := C.CString(key) + defer C.free(unsafe.Pointer(keyC)) + opC := C.CString(op) + defer C.free(unsafe.Pointer(opC)) + prefixC := C.CString(prefix) + defer C.free(unsafe.Pointer(prefixC)) + + count := len(values) + tuplePtr := (*C.field_value_tuple_t)(C.malloc(C.size_t(C.sizeof_field_value_tuple_t * count))) + defer C.free(unsafe.Pointer(tuplePtr)) + // Get a Go slice to the C array - this doesn't allocate anything + tuples := (*[(1 << 28) - 1]C.field_value_tuple_t)(unsafe.Pointer(tuplePtr))[:count:count] + + idx := 0 + for k, v := range values { + kC := C.CString(k) + defer C.free(unsafe.Pointer(kC)) + vC := C.CString(v) + defer C.free(unsafe.Pointer(vC)) + tuples[idx] = C.field_value_tuple_t{ + field: (*C.char)(kC), + value: (*C.char)(vC), + } + idx = idx + 1 + } + + C.table_set(C.table_t(pt.ptr), keyC, tuplePtr, C.size_t(count), opC, prefixC) +} + +func (pt Table) Del(key string, op string, prefix string) { + log.Printf( + "trace: swss: %s %s:%s", + op, + pt.table, + key, + ) + + keyC := C.CString(key) + defer C.free(unsafe.Pointer(keyC)) + opC := C.CString(op) + defer C.free(unsafe.Pointer(opC)) + prefixC := C.CString(prefix) + defer C.free(unsafe.Pointer(prefixC)) + + C.table_del(C.table_t(pt.ptr), keyC, opC, prefixC) +} + +func (pt Table) Flush() { + C.table_flush(C.table_t(pt.ptr)) +} diff --git a/telemetry/telemetry.go b/telemetry/telemetry.go index cbe8cf0d..a3f5e1f0 100644 --- a/telemetry/telemetry.go +++ b/telemetry/telemetry.go @@ -29,6 +29,7 @@ var ( jwtRefInt = flag.Uint64("jwt_refresh_int", 900, "Seconds before JWT expiry the token can be refreshed.") jwtValInt = flag.Uint64("jwt_valid_int", 3600, "Seconds that JWT token is valid for.") gnmi_translib_write = flag.Bool("gnmi_translib_write", gnmi.ENABLE_TRANSLIB_WRITE, "Enable gNMI translib write for management framework") + gnmi_native_write = flag.Bool("gnmi_native_write", gnmi.ENABLE_NATIVE_WRITE, "Enable gNMI native write") ) func main() { @@ -61,6 +62,7 @@ func main() { cfg := &gnmi.Config{} cfg.Port = int64(*port) cfg.EnableTranslibWrite = bool(*gnmi_translib_write) + cfg.EnableNativeWrite = bool(*gnmi_native_write) cfg.LogLevel = 3 var opts []grpc.ServerOption @@ -137,6 +139,7 @@ func main() { cfg.Port = int64(*port) cfg.UserAuth = userAuth cfg.EnableTranslibWrite = bool(*gnmi_translib_write) + cfg.EnableNativeWrite = bool(*gnmi_native_write) gnmi.GenerateJwtSecretKey() } diff --git a/test/test_gnmi_appldb.py b/test/test_gnmi_appldb.py new file mode 100644 index 00000000..96f25337 --- /dev/null +++ b/test/test_gnmi_appldb.py @@ -0,0 +1,420 @@ + +import json +from utils import gnmi_set, gnmi_get, gnmi_get_with_encoding + +import pytest + + +test_data_update_normal = [ + [ + { + 'update_path': '/sonic-db:APPL_DB/DASH_QOS', + 'get_path': '/sonic-db:APPL_DB/_DASH_QOS', + 'value': { + 'qos_01': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + 'qos_02': {'bw': '6000', 'cps': '200', 'flows': '101'} + } + }, + { + 'update_path': '/sonic-db:APPL_DB/DASH_VNET', + 'get_path': '/sonic-db:APPL_DB/_DASH_VNET', + 'value': { + 'Vnet3721': { + 'address_spaces': ["10.250.0.0", "192.168.3.0", "139.66.72.9"] + } + } + } + ], + [ + { + 'update_path': '/sonic-db:APPL_DB/DASH_QOS/qos_01', + 'get_path': '/sonic-db:APPL_DB/_DASH_QOS/qos_01', + 'value': {'bw': '10001', 'cps': '1001', 'flows': '101'} + }, + { + 'update_path': '/sonic-db:APPL_DB/DASH_QOS/qos_02', + 'get_path': '/sonic-db:APPL_DB/_DASH_QOS/qos_02', + 'value': {'bw': '10002', 'cps': '1002', 'flows': '102'} + }, + { + 'update_path': '/sonic-db:APPL_DB/DASH_VNET/Vnet3721', + 'get_path': '/sonic-db:APPL_DB/_DASH_VNET/Vnet3721', + 'value': { + 'address_spaces': ["10.250.0.0", "192.168.3.0", "139.66.72.9"] + } + } + ] +] + +def clear_appl_db(table_name): + prefix = '/sonic-db:APPL_DB' + get_path = prefix + '/_' + table_name + ret, msg_list = gnmi_get([get_path]) + if ret != 0: + return + for msg in msg_list: + rx_data = json.loads(msg) + delete_list = [] + for key in rx_data.keys(): + delete_path = prefix + '/' + table_name + '/' + key + delete_list.append(delete_path) + if len(delete_list): + ret, msg = gnmi_set(delete_list, [], []) + assert ret == 0, msg + +class TestGNMIApplDb: + + @pytest.mark.parametrize('test_data', test_data_update_normal) + def test_gnmi_update_normal_01(self, test_data): + clear_appl_db('DASH_QOS') + clear_appl_db('DASH_VNET') + update_list = [] + get_list = [] + for i, data in enumerate(test_data): + path = data['update_path'] + get_path = data['get_path'] + value = json.dumps(data['value']) + file_name = 'update' + str(i) + file_object = open(file_name, 'w') + file_object.write(value) + file_object.close() + update_list.append(path + ':@./' + file_name) + get_list.append(get_path) + + ret, msg = gnmi_set([], update_list, []) + assert ret == 0, msg + ret, msg_list = gnmi_get(get_list) + assert ret == 0, 'Invalid return code' + assert len(msg_list), 'Invalid msg: ' + str(msg_list) + for i, data in enumerate(test_data): + hit = False + for msg in msg_list: + rx_data = json.loads(msg) + if data['value'] == rx_data: + hit = True + break + assert hit == True, 'No match for %s'%str(data['value']) + + @pytest.mark.parametrize('test_data', test_data_update_normal) + def test_gnmi_delete_normal_01(self, test_data): + delete_list = [] + update_list = [] + get_list = [] + for i, data in enumerate(test_data): + path = data['update_path'] + path_length = path.count('/') + # path length is 2, path has table name, and has no key + # there's no consumer for unit test, and gnmi cannot delete temporary state table + if path_length <= 2: + continue + get_path = data['get_path'] + value = json.dumps(data['value']) + file_name = 'update' + str(i) + file_object = open(file_name, 'w') + file_object.write(value) + file_object.close() + update_list.append(path + ':@./' + file_name) + delete_list.append(path) + get_list.append(get_path) + + if len(update_list) == 0: + return + ret, msg = gnmi_set([], update_list, []) + assert ret == 0, msg + ret, msg = gnmi_set(delete_list, [], []) + assert ret == 0, msg + for get in get_list: + ret, msg_list = gnmi_get([get]) + if ret != 0: + continue + for msg in msg_list: + assert msg == '{}', 'Delete failed' + + @pytest.mark.parametrize('test_data', test_data_update_normal) + def test_gnmi_replace_normal_01(self, test_data): + clear_appl_db('DASH_QOS') + clear_appl_db('DASH_VNET') + replace_list = [] + get_list = [] + for i, data in enumerate(test_data): + path = data['update_path'] + get_path = data['get_path'] + value = json.dumps(data['value']) + file_name = 'update' + str(i) + file_object = open(file_name, 'w') + file_object.write(value) + file_object.close() + replace_list.append(path + ':@./' + file_name) + get_list.append(get_path) + + ret, msg = gnmi_set([], [], replace_list) + assert ret == 0, msg + ret, msg_list = gnmi_get(get_list) + assert ret == 0, 'Invalid return code' + assert len(msg_list), 'Invalid msg: ' + str(msg_list) + for i, data in enumerate(test_data): + hit = False + for msg in msg_list: + rx_data = json.loads(msg) + if data['value'] == rx_data: + hit = True + break + assert hit == True, 'No match for %s'%str(data['value']) + + @pytest.mark.parametrize('test_data', test_data_update_normal) + def test_gnmi_replace_normal_02(self, test_data): + replace_list = [] + update_list = [] + get_list = [] + for i, data in enumerate(test_data): + path = data['update_path'] + path_length = path.count('/') + # path length is 2, path has table name, and has no key + # there's no consumer for unit test, and gnmi cannot delete temporary state table + if path_length <= 2: + continue + get_path = data['get_path'] + value = json.dumps(data['value']) + file_name = 'update' + str(i) + file_object = open(file_name, 'w') + file_object.write(value) + file_object.close() + update_list.append(path + ':@./' + file_name) + replace_list.append(path + ':#') + get_list.append(get_path) + + if len(update_list) == 0: + return + ret, msg = gnmi_set([], update_list, []) + assert ret == 0, msg + ret, msg = gnmi_set([], [], replace_list) + assert ret == 0, msg + for get in get_list: + ret, msg_list = gnmi_get([get]) + if ret != 0: + continue + for msg in msg_list: + assert msg == '{}', 'Delete failed' + + def test_gnmi_invalid_path_01(self): + path = '/sonic-db:APPL_DB/DASH_QOS/qos_01/bw' + value = '300' + update_list = [] + text = json.dumps(value) + file_name = 'update.txt' + file_object = open(file_name, 'w') + file_object.write(text) + file_object.close() + update_list = [path + ':@./' + file_name] + + ret, msg = gnmi_set([], update_list, []) + assert ret != 0, 'Invalid path' + assert 'Unsupported path' in msg + + def test_gnmi_invalid_origin_01(self): + path = '/sonic-invalid:APPL_DB/DASH_QOS' + value = { + 'qos_01': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + 'qos_02': {'bw': '6000', 'cps': '200', 'flows': '101'} + } + update_list = [] + text = json.dumps(value) + file_name = 'update.txt' + file_object = open(file_name, 'w') + file_object.write(text) + file_object.close() + update_list = [path + ':@./' + file_name] + + ret, msg = gnmi_set([], update_list, []) + assert ret != 0, 'Origin is invalid' + assert 'Invalid origin' in msg + + get_list = [path] + ret, msg_list = gnmi_get(get_list) + assert ret != 0, 'Origin is invalid' + hit = False + exp = 'Invalid origin' + for msg in msg_list: + if exp in msg: + hit = True + break + assert hit == True, 'No expected error: %s'%exp + + def test_gnmi_invalid_origin_02(self): + path = '/sonic-yang:APPL_DB/DASH_QOS' + value = { + 'qos_01': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + 'qos_02': {'bw': '6000', 'cps': '200', 'flows': '101'} + } + update_list = [] + text = json.dumps(value) + file_name = 'update.txt' + file_object = open(file_name, 'w') + file_object.write(text) + file_object.close() + update_list = [path + ':@./' + file_name] + + ret, msg = gnmi_set([], update_list, []) + assert ret != 0, 'Origin is invalid' + assert 'not implemented' in msg + + get_list = [path] + ret, msg_list = gnmi_get(get_list) + assert ret != 0, 'Origin is invalid' + hit = False + exp = 'not implemented' + for msg in msg_list: + if exp in msg: + hit = True + break + assert hit == True, 'No expected error: %s'%exp + + def test_gnmi_invalid_origin_03(self): + path1 = '/sonic-db:APPL_DB/DASH_QOS' + path2 = '/sonic-yang:APPL_DB/DASH_QOS' + value = { + 'qos_01': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + 'qos_02': {'bw': '6000', 'cps': '200', 'flows': '101'} + } + update_list = [] + text = json.dumps(value) + file_name = 'update.txt' + file_object = open(file_name, 'w') + file_object.write(text) + file_object.close() + update_list = [path1 + ':@./' + file_name, path2 + ':@./' + file_name] + + ret, msg = gnmi_set([], update_list, []) + assert ret != 0, 'Origin is invalid' + assert 'Origin conflict' in msg + + get_list = [path1, path2] + ret, msg_list = gnmi_get(get_list) + assert ret != 0, 'Origin is invalid' + hit = False + exp = 'Origin conflict' + for msg in msg_list: + if exp in msg: + hit = True + break + assert hit == True, 'No expected error: %s'%exp + + def test_gnmi_invalid_origin_04(self): + path = '/APPL_DB/DASH_QOS' + value = { + 'qos_01': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + 'qos_02': {'bw': '6000', 'cps': '200', 'flows': '101'} + } + update_list = [] + text = json.dumps(value) + file_name = 'update.txt' + file_object = open(file_name, 'w') + file_object.write(text) + file_object.close() + update_list = [path + ':@./' + file_name] + + ret, msg = gnmi_set([], update_list, []) + assert ret != 0, 'Origin is invalid' + assert 'No origin' in msg + + get_list = [path] + ret, msg_list = gnmi_get(get_list) + assert ret != 0, 'Origin is invalid' + hit = False + exp = 'No origin' + for msg in msg_list: + if exp in msg: + hit = True + break + assert hit == True, 'No expected error: %s'%exp + + def test_gnmi_invalid_target_01(self): + path = '/sonic-db:INVALID_DB/DASH_QOS' + value = { + 'qos_01': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + 'qos_02': {'bw': '6000', 'cps': '200', 'flows': '101'} + } + update_list = [] + text = json.dumps(value) + file_name = 'update.txt' + file_object = open(file_name, 'w') + file_object.write(text) + file_object.close() + update_list = [path + ':@./' + file_name] + + ret, msg = gnmi_set([], update_list, []) + assert ret != 0, 'Target is invalid' + assert 'Invalid target' in msg + + get_list = [path] + ret, msg_list = gnmi_get(get_list) + assert ret != 0, 'Target is invalid' + hit = False + exp = 'Invalid target' + for msg in msg_list: + if exp in msg: + hit = True + break + assert hit == True, 'No expected error: %s'%exp + + def test_gnmi_invalid_target_02(self): + path = '/sonic-db:ASIC_DB/DASH_QOS' + value = { + 'qos_01': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + 'qos_02': {'bw': '6000', 'cps': '200', 'flows': '101'} + } + update_list = [] + text = json.dumps(value) + file_name = 'update.txt' + file_object = open(file_name, 'w') + file_object.write(text) + file_object.close() + update_list = [path + ':@./' + file_name] + + ret, msg = gnmi_set([], update_list, []) + assert ret != 0, 'Target is invalid' + assert 'Set RPC does not support ASIC_DB' in msg + + def test_gnmi_invalid_target_03(self): + path1 = '/sonic-db:APPL_DB/DASH_QOS' + path2 = '/sonic-db:CONFIG_DB/DASH_QOS' + value = { + 'qos_01': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + 'qos_02': {'bw': '6000', 'cps': '200', 'flows': '101'} + } + update_list = [] + text = json.dumps(value) + file_name = 'update.txt' + file_object = open(file_name, 'w') + file_object.write(text) + file_object.close() + update_list = [path1 + ':@./' + file_name, path2 + ':@./' + file_name] + + ret, msg = gnmi_set([], update_list, []) + assert ret != 0, 'Target is invalid' + assert 'Target conflict' in msg + + get_list = [path1, path2] + ret, msg_list = gnmi_get(get_list) + assert ret != 0, 'Target is invalid' + hit = False + exp = 'Target conflict' + for msg in msg_list: + if exp in msg: + hit = True + break + assert hit == True, 'No expected error: %s'%exp + + def test_gnmi_invalid_encoding(self): + path = '/sonic-db:APPL_DB/DASH_QOS' + get_list = [path] + ret, msg_list = gnmi_get_with_encoding(get_list, "PROTO") + assert ret != 0, 'Encoding is not supported' + hit = False + exp = 'unsupported encoding' + for msg in msg_list: + if exp in msg: + hit = True + break + assert hit == True, 'No expected error: %s'%exp + diff --git a/test/test_gnmi_capabilities.py b/test/test_gnmi_capabilities.py new file mode 100644 index 00000000..fe0966cd --- /dev/null +++ b/test/test_gnmi_capabilities.py @@ -0,0 +1,11 @@ +import pytest +from utils import gnmi_capabilities + +class TestGNMICapabilities: + + def test_gnmi_cap(self): + ret, msg = gnmi_capabilities() + assert ret == 0, msg + assert "sonic-db" in msg, "No sonic-db in msg: " + msg + assert "sonic-yang" in msg, "No sonic-yang in msg: " + msg + diff --git a/test/test_gnmi_configdb.py b/test/test_gnmi_configdb.py new file mode 100644 index 00000000..b3009daa --- /dev/null +++ b/test/test_gnmi_configdb.py @@ -0,0 +1,350 @@ + +import os +import json +import time +from utils import gnmi_set, gnmi_get, gnmi_dump + +import pytest + + +test_data_update_normal = [ + [ + { + 'path': '/sonic-db:CONFIG_DB/PORT', + 'value': { + 'Ethernet4': {'admin_status': 'down'}, + 'Ethernet8': {'admin_status': 'down'} + } + } + ], + [ + { + 'path': '/sonic-db:CONFIG_DB/PORT/Ethernet4/admin_status', + 'value': 'up' + }, + { + 'path': '/sonic-db:CONFIG_DB/PORT/Ethernet8/admin_status', + 'value': 'up' + } + ], + [ + { + 'path': '/sonic-db:CONFIG_DB/PORT/Ethernet4', + 'value': {'admin_status': 'down'} + }, + { + 'path': '/sonic-db:CONFIG_DB/PORT/Ethernet8', + 'value': {'admin_status': 'down'} + } + ] +] + +test_json_checkpoint = { + "DASH_QOS": { + 'qos_01': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + 'qos_02': {'bw': '6000', 'cps': '200', 'flows': '101'} + }, + "DASH_VNET": { + 'vnet_3721': { + 'address_spaces': ["10.250.0.0", "192.168.3.0", "139.66.72.9"] + } + } +} + +test_data_checkpoint = [ + [ + { + 'path': '/sonic-db:CONFIG_DB/DASH_QOS', + 'value': { + 'qos_01': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + 'qos_02': {'bw': '6000', 'cps': '200', 'flows': '101'} + } + }, + { + 'path': '/sonic-db:CONFIG_DB/DASH_VNET', + 'value': { + 'vnet_3721': { + 'address_spaces': ["10.250.0.0", "192.168.3.0", "139.66.72.9"] + } + } + } + ], + [ + { + 'path': '/sonic-db:CONFIG_DB/DASH_QOS/qos_01', + 'value': {'bw': '54321', 'cps': '1000', 'flows': '300'}, + }, + { + 'path': '/sonic-db:CONFIG_DB/DASH_QOS/qos_02', + 'value': {'bw': '6000', 'cps': '200', 'flows': '101'} + }, + { + 'path': '/sonic-db:CONFIG_DB/DASH_VNET/vnet_3721', + 'value': { + 'address_spaces': ["10.250.0.0", "192.168.3.0", "139.66.72.9"] + } + } + ], + [ + { + 'path': '/sonic-db:CONFIG_DB/DASH_QOS/qos_01/flows', + 'value': '300' + }, + { + 'path': '/sonic-db:CONFIG_DB/DASH_QOS/qos_02/bw', + 'value': '6000' + }, + { + 'path': '/sonic-db:CONFIG_DB/DASH_VNET/vnet_3721/address_spaces', + 'value': ["10.250.0.0", "192.168.3.0", "139.66.72.9"] + } + ], + [ + { + 'path': '/sonic-db:CONFIG_DB/DASH_VNET/vnet_3721/address_spaces/0', + 'value': "10.250.0.0" + }, + { + 'path': '/sonic-db:CONFIG_DB/DASH_VNET/vnet_3721/address_spaces/1', + 'value': "192.168.3.0" + } + ] +] + +patch_file = '/tmp/gcu.patch' +config_file = '/tmp/config_db.json.tmp' +checkpoint_file = '/etc/sonic/config.cp.json' + +def create_checkpoint(file_name, text): + file_object = open(file_name, 'w') + file_object.write(text) + file_object.close() + return + +class TestGNMIConfigDb: + + @pytest.mark.parametrize("test_data", test_data_update_normal) + def test_gnmi_incremental_update(self, test_data): + create_checkpoint(checkpoint_file, '{}') + + update_list = [] + for i, data in enumerate(test_data): + path = data['path'] + value = json.dumps(data['value']) + file_name = 'update' + str(i) + file_object = open(file_name, 'w') + file_object.write(value) + file_object.close() + update_list.append(path + ':@./' + file_name) + + ret, old_apply_patch_cnt = gnmi_dump("DBUS apply patch db") + assert ret == 0, 'Fail to read counter' + ret, old_create_checkpoint_cnt = gnmi_dump("DBUS create checkpoint") + assert ret == 0, 'Fail to read counter' + ret, old_delete_checkpoint_cnt = gnmi_dump("DBUS delete checkpoint") + assert ret == 0, 'Fail to read counter' + ret, old_config_save_cnt = gnmi_dump("DBUS config save") + assert ret == 0, 'Fail to read counter' + ret, msg = gnmi_set([], update_list, []) + assert ret == 0, msg + assert os.path.exists(patch_file), "No patch file" + with open(patch_file,'r') as pf: + patch_json = json.load(pf) + for item in test_data: + test_path = item['path'] + test_value = item['value'] + for patch_data in patch_json: + assert patch_data['op'] == 'add', "Invalid operation" + if test_path == '/sonic-db:CONFIG_DB' + patch_data['path'] and test_value == patch_data['value']: + break + else: + pytest.fail('No item in patch: %s'%str(item)) + ret, new_apply_patch_cnt = gnmi_dump("DBUS apply patch db") + assert ret == 0, 'Fail to read counter' + assert new_apply_patch_cnt == old_apply_patch_cnt + 1, 'DBUS API is not invoked' + ret, new_create_checkpoint_cnt = gnmi_dump("DBUS create checkpoint") + assert ret == 0, 'Fail to read counter' + assert new_create_checkpoint_cnt == old_create_checkpoint_cnt + 1, 'DBUS API is not invoked' + ret, new_delete_checkpoint_cnt = gnmi_dump("DBUS delete checkpoint") + assert ret == 0, 'Fail to read counter' + assert new_delete_checkpoint_cnt == old_delete_checkpoint_cnt + 1, 'DBUS API is not invoked' + ret, new_config_save_cnt = gnmi_dump("DBUS config save") + assert ret == 0, 'Fail to read counter' + assert new_config_save_cnt == old_config_save_cnt + 1, 'DBUS API is not invoked' + + @pytest.mark.parametrize("test_data", test_data_checkpoint) + def test_gnmi_incremental_delete(self, test_data): + create_checkpoint(checkpoint_file, json.dumps(test_json_checkpoint)) + + if os.path.exists(patch_file): + os.remove(patch_file) + delete_list = [] + for i, data in enumerate(test_data): + path = data['path'] + delete_list.append(path) + ret, old_cnt = gnmi_dump("DBUS apply patch db") + assert ret == 0, 'Fail to read counter' + ret, msg = gnmi_set(delete_list, [], []) + assert ret == 0, msg + assert os.path.exists(patch_file), "No patch file" + with open(patch_file,'r') as pf: + patch_json = json.load(pf) + for item in test_data: + test_path = item['path'] + for patch_data in patch_json: + assert patch_data['op'] == 'remove', "Invalid operation" + if test_path == '/sonic-db:CONFIG_DB' + patch_data['path']: + break + else: + pytest.fail('No item in patch: %s'%str(item)) + ret, new_cnt = gnmi_dump("DBUS apply patch db") + assert ret == 0, 'Fail to read counter' + assert new_cnt == old_cnt+1, 'DBUS API should not be invoked' + + @pytest.mark.parametrize("test_data", test_data_update_normal) + def test_gnmi_incremental_delete_negative(self, test_data): + create_checkpoint(checkpoint_file, '{}') + if os.path.exists(patch_file): + os.remove(patch_file) + + delete_list = [] + for i, data in enumerate(test_data): + path = data['path'] + delete_list.append(path) + + ret, old_cnt = gnmi_dump("DBUS apply patch db") + assert ret == 0, 'Fail to read counter' + ret, msg = gnmi_set(delete_list, [], []) + assert ret == 0, msg + assert not os.path.exists(patch_file), "Should not generate patch file" + ret, new_cnt = gnmi_dump("DBUS apply patch db") + assert ret == 0, 'Fail to read counter' + assert new_cnt == old_cnt, 'DBUS API should not be invoked' + + @pytest.mark.parametrize("test_data", test_data_update_normal) + def test_gnmi_incremental_replace(self, test_data): + create_checkpoint(checkpoint_file, '{}') + + replace_list = [] + for i, data in enumerate(test_data): + path = data['path'] + value = json.dumps(data['value']) + file_name = 'update' + str(i) + file_object = open(file_name, 'w') + file_object.write(value) + file_object.close() + replace_list.append(path + ':@./' + file_name) + + ret, old_cnt = gnmi_dump("DBUS apply patch db") + assert ret == 0, 'Fail to read counter' + ret, msg = gnmi_set([], [], replace_list) + assert ret == 0, msg + assert os.path.exists(patch_file), "No patch file" + with open(patch_file,'r') as pf: + patch_json = json.load(pf) + for item in test_data: + test_path = item['path'] + test_value = item['value'] + for patch_data in patch_json: + assert patch_data['op'] == 'add', "Invalid operation" + if test_path == '/sonic-db:CONFIG_DB' + patch_data['path'] and test_value == patch_data['value']: + break + else: + pytest.fail('No item in patch: %s'%str(item)) + ret, new_cnt = gnmi_dump("DBUS apply patch db") + assert ret == 0, 'Fail to read counter' + assert new_cnt == old_cnt+1, 'DBUS API is not invoked' + + def test_gnmi_full(self): + test_data = { + 'field_01': '20001', + 'field_02': '20002', + 'field_03': '20003', + 'field_04': {'item_01': 'aaaa', 'item_02': 'xxxxx'} + } + file_name = 'config_db.test' + file_object = open(file_name, 'w') + value = json.dumps(test_data) + file_object.write(value) + file_object.close() + delete_list = ['/sonic-db:CONFIG_DB/'] + update_list = ['/sonic-db:CONFIG_DB/' + ':@./' + file_name] + + ret, msg = gnmi_set(delete_list, update_list, []) + assert ret == 0, msg + assert os.path.exists(config_file), "No config file" + with open(config_file,'r') as cf: + config_json = json.load(cf) + assert test_data == config_json, "Wrong config file" + + def test_gnmi_full_negative(self): + delete_list = ['/sonic-db:CONFIG_DB/'] + update_list = ['/sonic-db:CONFIG_DB/' + ':abc'] + + ret, msg = gnmi_set(delete_list, update_list, []) + assert ret != 0, 'Invalid ietf_json_val' + assert 'IETF JSON' in msg + + @pytest.mark.parametrize("test_data", test_data_checkpoint) + def test_gnmi_get_checkpoint(self, test_data): + if os.path.isfile(checkpoint_file): + os.remove(checkpoint_file) + + get_list = [] + for data in test_data: + path = data['path'] + get_list.append(path) + + ret, msg_list = gnmi_get(get_list) + if ret == 0: + for msg in msg_list: + assert msg == '{}', 'Invalid result' + + text = json.dumps(test_json_checkpoint) + create_checkpoint(checkpoint_file, text) + + get_list = [] + for data in test_data: + path = data['path'] + value = json.dumps(data['value']) + get_list.append(path) + + ret, msg_list = gnmi_get(get_list) + assert ret == 0, 'Invalid return code' + assert len(msg_list), 'Invalid msg: ' + str(msg_list) + for data in test_data: + hit = False + for msg in msg_list: + rx_data = json.loads(msg) + if data['value'] == rx_data: + hit = True + break + assert hit == True, 'No match for %s'%str(data['value']) + + def test_gnmi_get_checkpoint_negative_01(self): + text = json.dumps(test_json_checkpoint) + create_checkpoint(checkpoint_file, text) + + get_list = ['/sonic-db:CONFIG_DB/DASH_VNET/vnet_3721/address_spaces/0/abc'] + + ret, _ = gnmi_get(get_list) + assert ret != 0, 'Invalid path' + + def test_gnmi_get_checkpoint_negative_02(self): + text = json.dumps(test_json_checkpoint) + create_checkpoint(checkpoint_file, text) + + get_list = ['/sonic-db:CONFIG_DB/DASH_VNET/vnet_3721/address_spaces/abc'] + + ret, _ = gnmi_get(get_list) + assert ret != 0, 'Invalid path' + + def test_gnmi_get_checkpoint_negative_03(self): + text = json.dumps(test_json_checkpoint) + create_checkpoint(checkpoint_file, text) + + get_list = ['/sonic-db:CONFIG_DB/DASH_VNET/vnet_3721/address_spaces/1000'] + + ret, _ = gnmi_get(get_list) + assert ret != 0, 'Invalid path' + diff --git a/test/test_gnoi.py b/test/test_gnoi.py new file mode 100644 index 00000000..15c8bf6e --- /dev/null +++ b/test/test_gnoi.py @@ -0,0 +1,33 @@ +import pytest +from utils import gnoi_time, gnoi_setpackage, gnoi_switchcontrolprocessor +from utils import gnoi_reboot, gnoi_rebootstatus, gnoi_cancelreboot +from utils import gnoi_ping, gnoi_traceroute, gnmi_dump + +class TestGNOI: + + def test_gnoi_time(self): + ret, msg = gnoi_time() + assert ret == 0, msg + assert 'time' in msg, 'Invalid response: %s'%msg + + def test_gnoi_reboot(self): + ret, old_cnt = gnmi_dump('DBUS config reload') + assert ret == 0, 'Fail to read counter' + + ret, msg = gnoi_reboot(1, 0, 'Test reboot') + assert ret == 0, msg + + ret, new_cnt = gnmi_dump('DBUS config reload') + assert ret == 0, 'Fail to read counter' + assert new_cnt == old_cnt+1, 'DBUS API is not invoked' + + def test_gnoi_rebootstatus(self): + ret, msg = gnoi_rebootstatus() + assert ret != 0, 'RebootStatus should fail' + msg + assert 'Unimplemented' in msg + + def test_gnoi_cancelreboot(self): + ret, msg = gnoi_cancelreboot('Test reboot') + assert ret != 0, 'CancelReboot should fail' + msg + assert 'Unimplemented' in msg + diff --git a/test/utils.py b/test/utils.py new file mode 100644 index 00000000..26700e83 --- /dev/null +++ b/test/utils.py @@ -0,0 +1,253 @@ +import os +import re +import subprocess + +def run_cmd(cmd): + res = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + res.wait() + if res.returncode: + msg = str(res.stderr.read(), encoding='utf-8') + else: + msg = str(res.stdout.read(), encoding='utf-8') + return res.returncode, msg + +def gnmi_set(delete_list, update_list, replace_list): + path = os.getcwd() + cmd = path + '/build/bin/gnmi_set ' + cmd += '-insecure -username admin -password sonicadmin ' + cmd += '-target_addr 127.0.0.1:8080 ' + cmd += '-alsologtostderr ' + cmd += '-xpath_target MIXED ' + for delete in delete_list: + cmd += " -delete " + delete + for update in update_list: + cmd += " -update " + update + for replace in replace_list: + cmd += " -replace " + replace + ret, msg = run_cmd(cmd) + if ret == 0: + return ret, '' + return ret, msg + +def gnmi_set_with_password(delete_list, update_list, replace_list, user, password): + path = os.getcwd() + cmd = path + '/build/bin/gnmi_set ' + cmd += '-insecure -username %s -password %s '%(user, password) + cmd += '-target_addr 127.0.0.1:8080 ' + cmd += '-alsologtostderr ' + cmd += '-xpath_target MIXED ' + for delete in delete_list: + cmd += " -delete " + delete + for update in update_list: + cmd += " -update " + update + for replace in replace_list: + cmd += " -replace " + replace + ret, msg = run_cmd(cmd) + if ret == 0: + return ret, '' + return ret, msg + +def gnmi_set_with_jwt(delete_list, update_list, replace_list, token): + path = os.getcwd() + cmd = path + '/build/bin/gnmi_set ' + cmd += '-insecure -jwt_token ' + token + ' ' + cmd += '-target_addr 127.0.0.1:8080 ' + cmd += '-alsologtostderr ' + cmd += '-xpath_target MIXED ' + for delete in delete_list: + cmd += " -delete " + delete + for update in update_list: + cmd += " -update " + update + for replace in replace_list: + cmd += " -replace " + replace + ret, msg = run_cmd(cmd) + if ret == 0: + return ret, '' + return ret, msg + +def gnmi_get(path_list): + path = os.getcwd() + cmd = path + '/build/bin/gnmi_get ' + cmd += '-insecure -username admin -password sonicadmin ' + cmd += '-target_addr 127.0.0.1:8080 ' + cmd += '-alsologtostderr ' + cmd += '-xpath_target MIXED ' + for path in path_list: + cmd += " -xpath " + path + ret, msg = run_cmd(cmd) + if ret == 0: + msg = msg.replace('\\', '') + find_list = re.findall( r'json_ietf_val:\s*"(.*?)"\s*>', msg) + if find_list: + return ret, find_list + else: + return -1, [msg] + return ret, [msg] + +def gnmi_get_with_encoding(path_list, encoding): + path = os.getcwd() + cmd = path + '/build/bin/gnmi_get ' + cmd += '-insecure -username admin -password sonicadmin ' + cmd += '-target_addr 127.0.0.1:8080 ' + cmd += '-alsologtostderr ' + cmd += '-xpath_target MIXED ' + cmd += '-encoding %s '%(encoding) + for path in path_list: + cmd += " -xpath " + path + ret, msg = run_cmd(cmd) + if ret == 0: + msg = msg.replace('\\', '') + find_list = re.findall( r'json_ietf_val:\s*"(.*?)"\s*>', msg) + if find_list: + return ret, find_list + else: + return -1, [msg] + return ret, [msg] + +def gnmi_get_with_password(path_list, user, password): + path = os.getcwd() + cmd = path + '/build/bin/gnmi_get ' + cmd += '-insecure -username %s -password %s '%(user, password) + cmd += '-target_addr 127.0.0.1:8080 ' + cmd += '-alsologtostderr ' + cmd += '-xpath_target MIXED ' + for path in path_list: + cmd += " -xpath " + path + ret, msg = run_cmd(cmd) + if ret == 0: + msg = msg.replace('\\', '') + find_list = re.findall( r'json_ietf_val:\s*"(.*?)"\s*>', msg) + if find_list: + return ret, find_list + else: + return -1, [msg] + return ret, [msg] + +def gnmi_get_with_jwt(path_list, token): + path = os.getcwd() + cmd = path + '/build/bin/gnmi_get ' + cmd += '-insecure -jwt_token ' + token + ' ' + cmd += '-target_addr 127.0.0.1:8080 ' + cmd += '-alsologtostderr ' + cmd += '-xpath_target MIXED ' + for path in path_list: + cmd += " -xpath " + path + ret, msg = run_cmd(cmd) + if ret == 0: + msg = msg.replace('\\', '') + find_list = re.findall( r'json_ietf_val:\s*"(.*?)"\s*>', msg) + if find_list: + return ret, find_list + else: + return -1, [msg] + return ret, [msg] + +def gnmi_capabilities(): + path = os.getcwd() + cmd = path + '/build/bin/gnmi_cli ' + cmd += '-client_types=gnmi -a 127.0.0.1:8080 -logtostderr -insecure ' + cmd += '-capabilities ' + ret, msg = run_cmd(cmd) + return ret, msg + +def gnmi_dump(name): + path = os.getcwd() + cmd = 'sudo ' + path + '/build/bin/gnmi_dump' + ret, msg = run_cmd(cmd) + if ret == 0: + msg_list = msg.split('\n') + for line in msg_list: + if '---' in line: + current = line.split('---') + if current[0] == name: + return 0, int(current[1]) + return -1, 0 + return ret, 0 + +def gnoi_time(): + path = os.getcwd() + cmd = path + '/build/bin/gnoi_client ' + cmd += '-insecure -target 127.0.0.1:8080 ' + cmd += '-rpc Time ' + ret, msg = run_cmd(cmd) + return ret, msg + +def gnoi_reboot(method, delay, message): + path = os.getcwd() + cmd = path + '/build/bin/gnoi_client ' + cmd += '-insecure -target 127.0.0.1:8080 ' + cmd += '-rpc Reboot ' + cmd += '-jsonin "{\\\"method\\\":%d, \\\"delay\\\":%d, \\\"message\\\":\\\"%s\\\"}"'%(method, delay, message) + ret, msg = run_cmd(cmd) + return ret, msg + +def gnoi_rebootstatus(): + path = os.getcwd() + cmd = path + '/build/bin/gnoi_client ' + cmd += '-insecure -target 127.0.0.1:8080 ' + cmd += '-rpc RebootStatus ' + ret, msg = run_cmd(cmd) + return ret, msg + +def gnoi_cancelreboot(message): + path = os.getcwd() + cmd = path + '/build/bin/gnoi_client ' + cmd += '-insecure -target 127.0.0.1:8080 ' + cmd += '-rpc CancelReboot ' + cmd += '-jsonin "{\\\"message\\\":\\\"%s\\\"}"'%(message) + ret, msg = run_cmd(cmd) + return ret, msg + +def gnoi_ping(dst): + path = os.getcwd() + cmd = path + '/build/bin/gnoi_client ' + cmd += '-insecure -target 127.0.0.1:8080 ' + cmd += '-rpc Ping ' + cmd += '-jsonin "{\\\"destination\\\":\\\"%s\\\"}"'%(dst) + ret, msg = run_cmd(cmd) + return ret, msg + + +def gnoi_traceroute(dst): + path = os.getcwd() + cmd = path + '/build/bin/gnoi_client ' + cmd += '-insecure -target 127.0.0.1:8080 ' + cmd += '-rpc Traceroute ' + cmd += '-jsonin "{\\\"destination\\\":\\\"%s\\\"}"'%(dst) + ret, msg = run_cmd(cmd) + return ret, msg + +def gnoi_setpackage(): + path = os.getcwd() + cmd = path + '/build/bin/gnoi_client ' + cmd += '-insecure -target 127.0.0.1:8080 ' + cmd += '-rpc SetPackage ' + ret, msg = run_cmd(cmd) + return ret, msg + +def gnoi_switchcontrolprocessor(): + path = os.getcwd() + cmd = path + '/build/bin/gnoi_client ' + cmd += '-insecure -target 127.0.0.1:8080 ' + cmd += '-rpc SwitchControlProcessor ' + ret, msg = run_cmd(cmd) + return ret, msg + +def gnoi_authenticate(username, password): + path = os.getcwd() + cmd = path + '/build/bin/gnoi_client ' + cmd += '-insecure -target 127.0.0.1:8080 ' + cmd += '-module Sonic -rpc authenticate ' + cmd += '-jsonin "{\\\"Username\\\":\\\"%s\\\", \\\"Password\\\":\\\"%s\\\"}"'%(username, password) + ret, msg = run_cmd(cmd) + return ret, msg + +def gnoi_refresh_with_jwt(token): + path = os.getcwd() + cmd = path + '/build/bin/gnoi_client ' + cmd += '-insecure -target 127.0.0.1:8080 ' + cmd += '-jwt_token ' + token + ' ' + cmd += '-module Sonic -rpc refresh ' + ret, msg = run_cmd(cmd) + return ret, msg + diff --git a/testdata/batch.txt b/testdata/batch.txt new file mode 100644 index 00000000..56ef4c30 --- /dev/null +++ b/testdata/batch.txt @@ -0,0 +1 @@ +{"qos00000001": {"bw":"1000"}, "qos00000002": {"bw":"1000"}, "qos00000003": {"bw":"1000"}, "qos00000004": {"bw":"1000"}, "qos00000005": {"bw":"1000"}, "qos00000006": {"bw":"1000"}, "qos00000007": {"bw":"1000"}, "qos00000008": {"bw":"1000"}, "qos00000009": {"bw":"1000"}, "qos00000010": {"bw":"1000"}, "qos00000011": {"bw":"1000"}, "qos00000012": {"bw":"1000"}, "qos00000013": {"bw":"1000"}, "qos00000014": {"bw":"1000"}, "qos00000015": {"bw":"1000"}, "qos00000016": {"bw":"1000"}, "qos00000017": {"bw":"1000"}, "qos00000018": {"bw":"1000"}, "qos00000019": {"bw":"1000"}, "qos00000020": {"bw":"1000"}, "qos00000021": {"bw":"1000"}, "qos00000022": {"bw":"1000"}, "qos00000023": {"bw":"1000"}, "qos00000024": {"bw":"1000"}, "qos00000025": {"bw":"1000"}, "qos00000026": {"bw":"1000"}, "qos00000027": {"bw":"1000"}, "qos00000028": {"bw":"1000"}, "qos00000029": {"bw":"1000"}, "qos00000030": {"bw":"1000"}, "qos00000031": {"bw":"1000"}, "qos00000032": {"bw":"1000"}, "qos00000033": {"bw":"1000"}, "qos00000034": {"bw":"1000"}, "qos00000035": {"bw":"1000"}, "qos00000036": {"bw":"1000"}, "qos00000037": {"bw":"1000"}, "qos00000038": {"bw":"1000"}, "qos00000039": {"bw":"1000"}, "qos00000040": {"bw":"1000"}, "qos00000041": {"bw":"1000"}, "qos00000042": {"bw":"1000"}, "qos00000043": {"bw":"1000"}, "qos00000044": {"bw":"1000"}, "qos00000045": {"bw":"1000"}, "qos00000046": {"bw":"1000"}, "qos00000047": {"bw":"1000"}, "qos00000048": {"bw":"1000"}, "qos00000049": {"bw":"1000"}, "qos00000050": {"bw":"1000"}, "qos00000051": {"bw":"1000"}, "qos00000052": {"bw":"1000"}, "qos00000053": {"bw":"1000"}, "qos00000054": {"bw":"1000"}, "qos00000055": {"bw":"1000"}, "qos00000056": {"bw":"1000"}, "qos00000057": {"bw":"1000"}, "qos00000058": {"bw":"1000"}, "qos00000059": {"bw":"1000"}, "qos00000060": {"bw":"1000"}, "qos00000061": {"bw":"1000"}, "qos00000062": {"bw":"1000"}, "qos00000063": {"bw":"1000"}, "qos00000064": {"bw":"1000"}, "qos00000065": {"bw":"1000"}, "qos00000066": {"bw":"1000"}, "qos00000067": {"bw":"1000"}, "qos00000068": {"bw":"1000"}, "qos00000069": {"bw":"1000"}, "qos00000070": {"bw":"1000"}, "qos00000071": {"bw":"1000"}, "qos00000072": {"bw":"1000"}, "qos00000073": {"bw":"1000"}, "qos00000074": {"bw":"1000"}, "qos00000075": {"bw":"1000"}, "qos00000076": {"bw":"1000"}, "qos00000077": {"bw":"1000"}, "qos00000078": {"bw":"1000"}, "qos00000079": {"bw":"1000"}, "qos00000080": {"bw":"1000"}, "qos00000081": {"bw":"1000"}, "qos00000082": {"bw":"1000"}, "qos00000083": {"bw":"1000"}, "qos00000084": {"bw":"1000"}, "qos00000085": {"bw":"1000"}, "qos00000086": {"bw":"1000"}, "qos00000087": {"bw":"1000"}, "qos00000088": {"bw":"1000"}, "qos00000089": {"bw":"1000"}, "qos00000090": {"bw":"1000"}, "qos00000091": {"bw":"1000"}, "qos00000092": {"bw":"1000"}, "qos00000093": {"bw":"1000"}, "qos00000094": {"bw":"1000"}, "qos00000095": {"bw":"1000"}, "qos00000096": {"bw":"1000"}, "qos00000097": {"bw":"1000"}, "qos00000098": {"bw":"1000"}, "qos00000099": {"bw":"1000"}, "qos00000100": {"bw":"1000"}} \ No newline at end of file