From b13f7a52759578b72ba4ea724ba71d185da5423b Mon Sep 17 00:00:00 2001 From: Utkarsh Dubey Date: Thu, 18 Jan 2024 15:01:45 +0530 Subject: [PATCH 1/4] adds nvme tcp targets in publish and nodegetinfo --- go.mod | 1 + pkg/common/common.go | 28 +++++++++++++++++++++++++-- pkg/controller/publisher.go | 15 +++++++++++--- pkg/node/node.go | 10 ++-------- pkg/node/node_connectivity_checker.go | 3 ++- 5 files changed, 43 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 114892b9..b0f86693 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,7 @@ module github.com/dell/csi-powerstore/v2 go 1.21 +replace github.com/dell/gopowerstore => ../gopowerstore require ( github.com/akutz/gosync v0.1.0 github.com/apparentlymart/go-cidr v1.1.0 diff --git a/pkg/common/common.go b/pkg/common/common.go index c58c133c..17787d99 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -298,7 +298,7 @@ func GetLogFields(ctx context.Context) log.Fields { return fields } -// GetISCSITargetsInfoFromStorage returns list of gobrick compatible iscsi tragets by querying PowerStore array +// GetISCSITargetsInfoFromStorage returns list of gobrick compatible iscsi targets by querying PowerStore array func GetISCSITargetsInfoFromStorage(client gopowerstore.Client, volumeApplianceID string) ([]gobrick.ISCSITargetInfo, error) { addrInfo, err := client.GetStorageISCSITargetAddresses(context.Background()) if err != nil { @@ -319,7 +319,31 @@ func GetISCSITargetsInfoFromStorage(client gopowerstore.Client, volumeApplianceI return result, nil } -// GetFCTargetsInfoFromStorage returns list of gobrick compatible FC tragets by querying PowerStore array +// GetNVMETCPTargetsInfoFromStorage returns list of gobrick compatible NVME TCP targets by querying PowerStore array +func GetNVMETCPTargetsInfoFromStorage(client gopowerstore.Client, volumeApplianceID string) ([]gobrick.NVMeTargetInfo, error) { + clusterInfo, err := client.GetCluster(context.Background()) + nvmeNQN := clusterInfo.NVMeNQN + + addrInfo, err := client.GetStorageNVMETCPTargetAddresses(context.Background()) + if err != nil { + log.Error(err.Error()) + return []gobrick.NVMeTargetInfo{}, err + } + // sort data by id + sort.Slice(addrInfo, func(i, j int) bool { + return addrInfo[i].ID < addrInfo[j].ID + }) + var result []gobrick.NVMeTargetInfo + for _, t := range addrInfo { + // volumeApplianceID will be empty in case the call is from NodeGetInfo + if t.ApplianceID == volumeApplianceID || volumeApplianceID == "" { + result = append(result, gobrick.NVMeTargetInfo{Target: nvmeNQN, Portal: fmt.Sprintf("%s:4420", t.Address)}) + } + } + return result, nil +} + +// GetFCTargetsInfoFromStorage returns list of gobrick compatible FC targets by querying PowerStore array func GetFCTargetsInfoFromStorage(client gopowerstore.Client, volumeApplianceID string) ([]gobrick.FCTargetInfo, error) { fcPorts, err := client.GetFCPorts(context.Background()) if err != nil { diff --git a/pkg/controller/publisher.go b/pkg/controller/publisher.go index 7b3ef751..e745c0f8 100644 --- a/pkg/controller/publisher.go +++ b/pkg/controller/publisher.go @@ -184,7 +184,6 @@ func (s *SCSIPublisher) addTargetsInfoToPublishContext( publishContext[fmt.Sprintf("%s%d", common.PublishContextFCWWPNPrefix, i)] = t.WWPN } - // There is no API availble for NVMeTCP and hence targets are added in node staging using goNVMe nvmefcTargetInfo, err := common.GetNVMEFCTargetInfoFromStorage(client, volumeApplianceID) if err != nil { log.Error("error unable to get NVMeFC targets from array", err) @@ -193,6 +192,16 @@ func (s *SCSIPublisher) addTargetsInfoToPublishContext( publishContext[fmt.Sprintf("%s%d", common.PublishContextNVMEFCPortalsPrefix, i)] = t.Portal publishContext[fmt.Sprintf("%s%d", common.PublishContextNVMEFCTargetsPrefix, i)] = t.Target } + + nvmetcpTargetInfo, err := common.GetNVMETCPTargetsInfoFromStorage(client, volumeApplianceID) + if err != nil { + log.Error("error unable to get NVMeTCP targets from array", err) + } + for i, t := range nvmetcpTargetInfo { + publishContext[fmt.Sprintf("%s%d", common.PublishContextNVMETCPPortalsPrefix, i)] = t.Portal + publishContext[fmt.Sprintf("%s%d", common.PublishContextNVMETCPTargetsPrefix, i)] = t.Target + } + // If the system is not capable of any protocol, then we will through the error if len(iscsiTargetsInfo) == 0 && len(fcTargetsInfo) == 0 && len(nvmefcTargetInfo) == 0 { return errors.New("unable to get targets for any protocol") @@ -274,12 +283,12 @@ func (n *NfsPublisher) Publish(ctx context.Context, req *csi.ControllerPublishVo if err != nil { return nil, status.Errorf(codes.Internal, "failure getting nas %s", err.Error()) } - fileInterface, err := client.GetFileInterface(ctx, nas.CurrentPreferredIPv4InterfaceId) + fileInterface, err := client.GetFileInterface(ctx, nas.CurrentPreferredIPv4InterfaceID) if err != nil { return nil, status.Errorf(codes.Internal, "failure getting file interface %s", err.Error()) } publishContext[KeyNasName] = nas.Name // we need to pass that to node part of the driver - publishContext[common.KeyNfsExportPath] = fileInterface.IpAddress + ":/" + export.Name + publishContext[common.KeyNfsExportPath] = fileInterface.IPAddress + ":/" + export.Name publishContext[common.KeyHostIP] = ipWithNat[0] if n.ExternalAccess != "" { parsedExternalAccess, _ := common.GetIPListWithMaskFromString(n.ExternalAccess) diff --git a/pkg/node/node.go b/pkg/node/node.go index 5ea61860..e0a173f0 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -253,13 +253,6 @@ func (s *Service) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeR return nil, status.Error(codes.InvalidArgument, "staging target path is required") } - nvmeIP := strings.Split(req.PublishContext["PORTAL0"], ":") - nvmeTargets, _ := s.nvmeLib.DiscoverNVMeTCPTargets(nvmeIP[0], false) - for i, t := range nvmeTargets { - req.PublishContext[fmt.Sprintf("%s%d", common.PublishContextNVMETCPTargetsPrefix, i)] = t.TargetNqn - req.PublishContext[fmt.Sprintf("%s%d", common.PublishContextNVMETCPPortalsPrefix, i)] = t.Portal + ":4420" - } - id, arrayID, protocol, _ := array.ParseVolumeID(ctx, id, s.DefaultArray(), req.VolumeCapability) var stager VolumeStager @@ -1082,7 +1075,8 @@ func (s *Service) NodeGetInfo(ctx context.Context, _ *csi.NodeGetInfoRequest) (* resp.AccessibleTopology.Segments[common.Name+"/"+arr.GetIP()+"-nvmefc"] = "true" } } else { - infoList, err := common.GetISCSITargetsInfoFromStorage(arr.GetClient(), "") + // useNVME/TCP + infoList, err := common.GetNVMETCPTargetsInfoFromStorage(arr.GetClient(), "") if err != nil { log.Errorf("couldn't get targets from array: %s", err.Error()) continue diff --git a/pkg/node/node_connectivity_checker.go b/pkg/node/node_connectivity_checker.go index cbf34341..5ba63c11 100644 --- a/pkg/node/node_connectivity_checker.go +++ b/pkg/node/node_connectivity_checker.go @@ -306,7 +306,8 @@ func (s *Service) populateTargetsInCache(array *array.PowerStoreArray) { break } } else { - infoList, err := common.GetISCSITargetsInfoFromStorage(array.GetClient(), "") + // for NVMeTCP + infoList, err := common.GetNVMETCPTargetsInfoFromStorage(array.GetClient(), "") if err != nil { log.Errorf("couldn't get targets from array: %s", err.Error()) return From 54eaddb1be60ad71948b49957bc5822292dbdbce Mon Sep 17 00:00:00 2001 From: Utkarsh Dubey Date: Thu, 18 Jan 2024 16:01:19 +0530 Subject: [PATCH 2/4] Corrects params as per lint fix --- pkg/controller/controller.go | 4 +- pkg/controller/controller_test.go | 62 ++++---- pkg/controller/creator.go | 2 +- pkg/controller/replication.go | 48 +++--- pkg/controller/replication_test.go | 50 +++--- pkg/controller/snapshotter.go | 2 +- pkg/node/acl.go | 4 +- pkg/node/acl_test.go | 26 ++-- pkg/node/node_test.go | 240 ++++++++++++++--------------- 9 files changed, 219 insertions(+), 219 deletions(-) diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 26badc8e..74fc45e5 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -746,11 +746,11 @@ func GetServiceTag(ctx context.Context, req *csi.CreateVolumeRequest, arr *array if err != nil { log.Warn("Received error while calling GetNAS ", err.Error()) } - if nas.CurrentNodeId == "" { + if nas.CurrentNodeID == "" { log.Warn("Unable to fetch the CurrentNodeId from the nas server") } else { // Removing "-node-X" from the end of CurrentNodeId to get Appliance Name - applianceName = strings.Split(nas.CurrentNodeId, "-node-")[0] + applianceName = strings.Split(nas.CurrentNodeID, "-node-")[0] // Fetching appliance information using the appliance name ap, err = arr.Client.GetApplianceByName(ctx, applianceName) if err != nil { diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index 5f07c4c6..fd5cc383 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -472,7 +472,7 @@ var _ = ginkgo.Describe("CSIControllerService", func() { clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID}, nil) clientMock.On("CreateFS", mock.Anything, mock.Anything).Return(gopowerstore.CreateResponse{ID: validBaseVolID}, nil) clientMock.On("GetFS", context.Background(), mock.Anything).Return(gopowerstore.FileSystem{NasServerID: validNasID}, nil) - clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeId: validNodeID}, nil) + clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeID: validNodeID}, nil) clientMock.On("GetApplianceByName", context.Background(), mock.Anything).Return(gopowerstore.ApplianceInstance{ServiceTag: validServiceTag}, nil) req := getTypicalCreateVolumeNFSRequest("my-vol", validVolSize) @@ -507,7 +507,7 @@ var _ = ginkgo.Describe("CSIControllerService", func() { clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID}, nil) clientMock.On("CreateFS", mock.Anything, mock.Anything).Return(gopowerstore.CreateResponse{ID: validBaseVolID}, nil) clientMock.On("GetFS", context.Background(), mock.Anything).Return(gopowerstore.FileSystem{NasServerID: validNasID}, nil) - clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeId: validNodeID}, nil) + clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeID: validNodeID}, nil) clientMock.On("GetApplianceByName", context.Background(), mock.Anything).Return(gopowerstore.ApplianceInstance{ServiceTag: validServiceTag}, nil) req := getTypicalCreateVolumeNFSRequest("my-vol", validVolSize) @@ -567,9 +567,9 @@ var _ = ginkgo.Describe("CSIControllerService", func() { ginkgo.It("should successfully create nfs volume with storage class NFS acls in volume response", func() { clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID}, nil) clientMock.On("CreateFS", mock.Anything, mock.Anything).Return(gopowerstore.CreateResponse{ID: validBaseVolID}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, nil) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) clientMock.On("GetFS", context.Background(), mock.Anything).Return(gopowerstore.FileSystem{NasServerID: validNasID}, nil) - clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeId: validNodeID}, nil) + clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeID: validNodeID}, nil) clientMock.On("GetApplianceByName", context.Background(), mock.Anything).Return(gopowerstore.ApplianceInstance{ServiceTag: validServiceTag}, nil) ctrlSvc.Arrays()[secondValidID].NfsAcls = "A::GROUP@:RWX" @@ -607,9 +607,9 @@ var _ = ginkgo.Describe("CSIControllerService", func() { ginkgo.It("should successfully create nfs volume with array config NFS acls in volume response", func() { clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID}, nil) clientMock.On("CreateFS", mock.Anything, mock.Anything).Return(gopowerstore.CreateResponse{ID: validBaseVolID}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, nil) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) clientMock.On("GetFS", context.Background(), mock.Anything).Return(gopowerstore.FileSystem{NasServerID: validNasID}, nil) - clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeId: validNodeID}, nil) + clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeID: validNodeID}, nil) clientMock.On("GetApplianceByName", context.Background(), mock.Anything).Return(gopowerstore.ApplianceInstance{ServiceTag: validServiceTag}, nil) ctrlSvc.Arrays()[secondValidID].NfsAcls = "A::GROUP@:RWX" @@ -646,9 +646,9 @@ var _ = ginkgo.Describe("CSIControllerService", func() { ginkgo.It("should successfully create nfs volume with default NFS acls in volume response", func() { clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID}, nil) clientMock.On("CreateFS", mock.Anything, mock.Anything).Return(gopowerstore.CreateResponse{ID: validBaseVolID}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, nil) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) clientMock.On("GetFS", context.Background(), mock.Anything).Return(gopowerstore.FileSystem{NasServerID: validNasID}, nil) - clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeId: validNodeID}, nil) + clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeID: validNodeID}, nil) clientMock.On("GetApplianceByName", context.Background(), mock.Anything).Return(gopowerstore.ApplianceInstance{ServiceTag: validServiceTag}, nil) req := getTypicalCreateVolumeNFSRequest("my-vol", validVolSize) @@ -683,9 +683,9 @@ var _ = ginkgo.Describe("CSIControllerService", func() { ginkgo.It("should successfully create nfs volume with empty NFS acls in volume response", func() { clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID}, nil) clientMock.On("CreateFS", mock.Anything, mock.Anything).Return(gopowerstore.CreateResponse{ID: validBaseVolID}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, nil) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) clientMock.On("GetFS", context.Background(), mock.Anything).Return(gopowerstore.FileSystem{NasServerID: validNasID}, nil) - clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeId: validNodeID}, nil) + clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeID: validNodeID}, nil) clientMock.On("GetApplianceByName", context.Background(), mock.Anything).Return(gopowerstore.ApplianceInstance{ServiceTag: validServiceTag}, nil) req := getTypicalCreateVolumeNFSRequest("my-vol", validVolSize) @@ -747,7 +747,7 @@ var _ = ginkgo.Describe("CSIControllerService", func() { clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID}, nil) clientMock.On("CreateFS", mock.Anything, mock.Anything).Return(gopowerstore.CreateResponse{ID: validBaseVolID}, nil) clientMock.On("GetFS", context.Background(), mock.Anything).Return(gopowerstore.FileSystem{NasServerID: validNasID}, nil) - clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeId: validNodeID}, nil) + clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeID: validNodeID}, nil) clientMock.On("GetApplianceByName", context.Background(), mock.Anything).Return(gopowerstore.ApplianceInstance{ServiceTag: validServiceTag}, nil) req := getTypicalCreateVolumeNFSRequest("my-vol", validVolSize) @@ -843,7 +843,7 @@ var _ = ginkgo.Describe("CSIControllerService", func() { SizeTotal: validVolSize, }, nil) clientMock.On("GetFS", context.Background(), mock.Anything).Return(gopowerstore.FileSystem{NasServerID: validNasID}, nil) - clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeId: validNodeID}, nil) + clientMock.On("GetNAS", context.Background(), mock.Anything).Return(gopowerstore.NAS{CurrentNodeID: validNodeID}, nil) clientMock.On("GetApplianceByName", context.Background(), mock.Anything).Return(gopowerstore.ApplianceInstance{ServiceTag: validServiceTag}, nil) req := getTypicalCreateVolumeNFSRequest(volName, validVolSize) @@ -1945,11 +1945,11 @@ var _ = ginkgo.Describe("CSIControllerService", func() { clientMock.On("GetNAS", mock.Anything, nasID). Return(gopowerstore.NAS{ Name: validNasName, - CurrentPreferredIPv4InterfaceId: interfaceID, + CurrentPreferredIPv4InterfaceID: interfaceID, }, nil) clientMock.On("GetFileInterface", mock.Anything, interfaceID). - Return(gopowerstore.FileInterface{IpAddress: secondValidID}, nil) + Return(gopowerstore.FileInterface{IPAddress: secondValidID}, nil) req := getTypicalControllerPublishVolumeRequest("multiple-writer", validNodeID, validNfsVolumeID) req.VolumeCapability = getVolumeCapabilityNFS() @@ -2009,11 +2009,11 @@ var _ = ginkgo.Describe("CSIControllerService", func() { clientMock.On("GetNAS", mock.Anything, nasID). Return(gopowerstore.NAS{ Name: validNasName, - CurrentPreferredIPv4InterfaceId: interfaceID, + CurrentPreferredIPv4InterfaceID: interfaceID, }, nil) clientMock.On("GetFileInterface", mock.Anything, interfaceID). - Return(gopowerstore.FileInterface{IpAddress: secondValidID}, nil) + Return(gopowerstore.FileInterface{IPAddress: secondValidID}, nil) req := getTypicalControllerPublishVolumeRequest("multiple-writer", validNodeID, validNfsVolumeID) req.VolumeCapability = getVolumeCapabilityNFS() @@ -2138,11 +2138,11 @@ var _ = ginkgo.Describe("CSIControllerService", func() { clientMock.On("GetNAS", mock.Anything, nasID). Return(gopowerstore.NAS{ Name: validNasName, - CurrentPreferredIPv4InterfaceId: interfaceID, + CurrentPreferredIPv4InterfaceID: interfaceID, }, nil) clientMock.On("GetFileInterface", mock.Anything, interfaceID). - Return(gopowerstore.FileInterface{IpAddress: secondValidID}, nil) + Return(gopowerstore.FileInterface{IPAddress: secondValidID}, nil) req := getTypicalControllerPublishVolumeRequest("multi-writer", validNodeID, validNfsVolumeID) req.VolumeCapability = getVolumeCapabilityNFS() @@ -3700,12 +3700,12 @@ var _ = ginkgo.Describe("CSIControllerService", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, validGroupID). Return(gopowerstore.ReplicationSession{ - RemoteSystemId: validRemoteSystemID, - LocalResourceId: validGroupID, - RemoteResourceId: validRemoteGroupID, + RemoteSystemID: validRemoteSystemID, + LocalResourceID: validGroupID, + RemoteResourceID: validRemoteGroupID, StorageElementPairs: []gopowerstore.StorageElementPair{{ - LocalStorageElementId: validBaseVolID, - RemoteStorageElementId: validRemoteVolID, + LocalStorageElementID: validBaseVolID, + RemoteStorageElementID: validRemoteVolID, }}, }, nil) @@ -3791,13 +3791,13 @@ var _ = ginkgo.Describe("CSIControllerService", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, validGroupID). Return(gopowerstore.ReplicationSession{ - LocalResourceId: validGroupID, - RemoteResourceId: validRemoteGroupID, - RemoteSystemId: validRemoteSystemID, + LocalResourceID: validGroupID, + RemoteResourceID: validRemoteGroupID, + RemoteSystemID: validRemoteSystemID, StorageElementPairs: []gopowerstore.StorageElementPair{ { - LocalStorageElementId: validBaseVolID, - RemoteStorageElementId: validRemoteVolID, + LocalStorageElementID: validBaseVolID, + RemoteStorageElementID: validRemoteVolID, }, }, }, nil) @@ -3877,9 +3877,9 @@ var _ = ginkgo.Describe("CSIControllerService", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, validGroupID). Return(gopowerstore.ReplicationSession{ - LocalResourceId: validGroupID, - RemoteResourceId: validRemoteGroupID, - RemoteSystemId: validRemoteSystemID, + LocalResourceID: validGroupID, + RemoteResourceID: validRemoteGroupID, + RemoteSystemID: validRemoteSystemID, StorageElementPairs: []gopowerstore.StorageElementPair{}, }, nil) diff --git a/pkg/controller/creator.go b/pkg/controller/creator.go index e77c760b..37fc349c 100644 --- a/pkg/controller/creator.go +++ b/pkg/controller/creator.go @@ -168,7 +168,7 @@ func setNFSCreateAttributes(reqParams map[string]string, createParams *gopowerst } } if protectionPolicyID, ok := reqParams[common.KeyProtectionPolicyID]; ok { - createParams.ProtectionPolicyId = protectionPolicyID + createParams.ProtectionPolicyID = protectionPolicyID } if fileEventsPublishingMode, ok := reqParams[common.KeyFileEventsPublishingMode]; ok { createParams.FileEventsPublishingMode = fileEventsPublishingMode diff --git a/pkg/controller/replication.go b/pkg/controller/replication.go index af2f10be..61dce057 100644 --- a/pkg/controller/replication.go +++ b/pkg/controller/replication.go @@ -66,8 +66,8 @@ func (s *Service) CreateRemoteVolume(ctx context.Context, var remoteVolumeID string for _, sp := range rs.StorageElementPairs { - if sp.LocalStorageElementId == id { - remoteVolumeID = sp.RemoteStorageElementId + if sp.LocalStorageElementID == id { + remoteVolumeID = sp.RemoteStorageElementID } } @@ -83,7 +83,7 @@ func (s *Service) CreateRemoteVolume(ctx context.Context, if err != nil { return nil, err } - remoteSystem, err := arr.Client.GetRemoteSystem(ctx, rs.RemoteSystemId) + remoteSystem, err := arr.Client.GetRemoteSystem(ctx, rs.RemoteSystemID) if err != nil { return nil, err } @@ -144,7 +144,7 @@ func (s *Service) CreateStorageProtectionGroup(ctx context.Context, return nil, err } - remoteSystem, err := arr.Client.GetRemoteSystem(ctx, rs.RemoteSystemId) + remoteSystem, err := arr.Client.GetRemoteSystem(ctx, rs.RemoteSystemID) if err != nil { return nil, err } @@ -167,8 +167,8 @@ func (s *Service) CreateStorageProtectionGroup(ctx context.Context, } return &csiext.CreateStorageProtectionGroupResponse{ - LocalProtectionGroupId: rs.LocalResourceId, - RemoteProtectionGroupId: rs.RemoteResourceId, + LocalProtectionGroupId: rs.LocalResourceID, + RemoteProtectionGroupId: rs.RemoteResourceID, LocalProtectionGroupAttributes: localParams, RemoteProtectionGroupAttributes: remoteParams, }, nil @@ -340,19 +340,19 @@ func (s *Service) ExecuteAction(ctx context.Context, var params *gopowerstore.FailoverParams switch action { case csiext.ActionTypes_FAILOVER_REMOTE.String(): - execAction = gopowerstore.RS_ACTION_FAILOVER + execAction = gopowerstore.RsActionFailover params = &gopowerstore.FailoverParams{IsPlanned: true, Reverse: false} case csiext.ActionTypes_UNPLANNED_FAILOVER_LOCAL.String(): - execAction = gopowerstore.RS_ACTION_FAILOVER + execAction = gopowerstore.RsActionFailover params = &gopowerstore.FailoverParams{IsPlanned: false, Reverse: false} case csiext.ActionTypes_SUSPEND.String(): - execAction = gopowerstore.RS_ACTION_PAUSE + execAction = gopowerstore.RsActionPause case csiext.ActionTypes_RESUME.String(): - execAction = gopowerstore.RS_ACTION_RESUME + execAction = gopowerstore.RsActionResume case csiext.ActionTypes_SYNC.String(): - execAction = gopowerstore.RS_ACTION_SYNC + execAction = gopowerstore.RsActionSync case csiext.ActionTypes_REPROTECT_LOCAL.String(): - execAction = gopowerstore.RS_ACTION_REPROTECT + execAction = gopowerstore.RsActionReprotect default: return nil, status.Errorf(codes.Unknown, "The requested action does not match with supported actions") } @@ -409,22 +409,22 @@ func validateRSState(session *gopowerstore.ReplicationSession, action gopowersto state := session.State log.Infof("replication session is in %s", state) switch action { - case gopowerstore.RS_ACTION_RESUME: + case gopowerstore.RsActionResume: if state == "OK" { log.Infof("RS (%s) is already in desired state: (%s)", session.ID, state) return true, false, nil } - case gopowerstore.RS_ACTION_REPROTECT: + case gopowerstore.RsActionReprotect: if state == "OK" { log.Infof("RS (%s) is already in desired state: (%s)", session.ID, state) return true, false, nil } - case gopowerstore.RS_ACTION_PAUSE: + case gopowerstore.RsActionPause: if state == "Paused" || state == "Paused_For_Migration" || state == "Paused_For_NDU" { log.Infof("RS (%s) is already in desired state: (%s)", session.ID, state) return true, false, nil } - case gopowerstore.RS_ACTION_FAILOVER: + case gopowerstore.RsActionFailover: if state == "Failing_Over" { return false, false, nil } @@ -466,7 +466,7 @@ func (s *Service) DeleteStorageProtectionGroup(ctx context.Context, if vg.ID != "" { if vg.ProtectionPolicyID != "" { _, err := arr.GetClient().ModifyVolumeGroup(ctx, &gopowerstore.VolumeGroupModify{ - ProtectionPolicyId: "", + ProtectionPolicyID: "", }, groupID) if apiErr, ok := err.(gopowerstore.APIError); ok && !apiErr.NotFound() { return nil, status.Errorf(codes.Internal, "Error: Unable to un-assign PP from Volume Group") @@ -601,21 +601,21 @@ func (s *Service) GetStorageProtectionGroupStatus(ctx context.Context, var state csiext.StorageProtectionGroupStatus_State switch rs.State { - case gopowerstore.RS_STATE_OK: + case gopowerstore.RsStateOk: state = csiext.StorageProtectionGroupStatus_SYNCHRONIZED break - case gopowerstore.RS_STATE_FAILED_OVER: + case gopowerstore.RsStateFailedOver: state = csiext.StorageProtectionGroupStatus_FAILEDOVER break - case gopowerstore.RS_STATE_PAUSED, gopowerstore.RS_STATE_PAUSED_FOR_MIGRATION, gopowerstore.RS_STATE_PAUSED_FOR_NDU, gopowerstore.RS_STATE_SYSTEM_PAUSED: + case gopowerstore.RsStatePaused, gopowerstore.RsStatePausedForMigration, gopowerstore.RsStatePausedForNdu, gopowerstore.RsStateSystemPaused: state = csiext.StorageProtectionGroupStatus_SUSPENDED break - case gopowerstore.RS_STATE_FAILING_OVER, gopowerstore.RS_STATE_FAILING_OVER_FOR_DR, gopowerstore.RS_STATE_RESUMING, - gopowerstore.RS_STATE_REPROTECTING, gopowerstore.RS_STATE_PARTIAL_CUTOVER_FOR_MIGRATION, gopowerstore.RS_STATE_SYNCHRONIZING, - gopowerstore.RS_STATE_INITIALIZING: + case gopowerstore.RsStateFailingOver, gopowerstore.RsStateFailingOverForDR, gopowerstore.RsStateResuming, + gopowerstore.RsStateReprotecting, gopowerstore.RsStatePartialCutoverForMigration, gopowerstore.RsStateSynchronizing, + gopowerstore.RsStateInitializing: state = csiext.StorageProtectionGroupStatus_SYNC_IN_PROGRESS break - case gopowerstore.RS_STATE_ERROR: + case gopowerstore.RsStateError: state = csiext.StorageProtectionGroupStatus_INVALID break default: diff --git a/pkg/controller/replication_test.go b/pkg/controller/replication_test.go index 712587cf..a9881f51 100755 --- a/pkg/controller/replication_test.go +++ b/pkg/controller/replication_test.go @@ -8,7 +8,7 @@ * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * @@ -41,7 +41,7 @@ var _ = ginkgo.Describe("Replication", func() { ginkgo.When("getting storage protection group status and state is ok", func() { ginkgo.It("should return synchronized status", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_OK}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStateOk}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -59,7 +59,7 @@ var _ = ginkgo.Describe("Replication", func() { ginkgo.When("getting storage protection group status and state is failed over", func() { ginkgo.It("should return failed over status", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_FAILED_OVER}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStateFailedOver}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -77,7 +77,7 @@ var _ = ginkgo.Describe("Replication", func() { ginkgo.When("getting storage protection group status and state is paused (for several reasons)", func() { ginkgo.It("should return suspended status (if paused)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_PAUSED}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStatePaused}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -92,7 +92,7 @@ var _ = ginkgo.Describe("Replication", func() { }) ginkgo.It("should return suspended status (if paused for migration)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_PAUSED_FOR_MIGRATION}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStatePausedForMigration}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -107,7 +107,7 @@ var _ = ginkgo.Describe("Replication", func() { }) ginkgo.It("should return suspended status (if paused for NDU)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_PAUSED_FOR_NDU}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStatePausedForNdu}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -122,7 +122,7 @@ var _ = ginkgo.Describe("Replication", func() { }) ginkgo.It("should return suspended status (if system paused)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_SYSTEM_PAUSED}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStateSystemPaused}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -140,7 +140,7 @@ var _ = ginkgo.Describe("Replication", func() { ginkgo.When("getting storage protection group status and state is updating (in progress)", func() { ginkgo.It("should return 'sync in progress' status (if failing over)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_FAILING_OVER}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStateFailingOver}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -155,7 +155,7 @@ var _ = ginkgo.Describe("Replication", func() { }) ginkgo.It("should return 'sync in progress' status (if failing over for DR)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_FAILING_OVER_FOR_DR}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStateFailingOverForDR}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -170,7 +170,7 @@ var _ = ginkgo.Describe("Replication", func() { }) ginkgo.It("should return 'sync in progress' status (if resuming)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_RESUMING}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStateResuming}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -185,7 +185,7 @@ var _ = ginkgo.Describe("Replication", func() { }) ginkgo.It("should return 'sync in progress' status (if reprotecting)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_REPROTECTING}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStateReprotecting}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -200,7 +200,7 @@ var _ = ginkgo.Describe("Replication", func() { }) ginkgo.It("should return 'sync in progress' status (if cutover for migration)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_PARTIAL_CUTOVER_FOR_MIGRATION}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStatePartialCutoverForMigration}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -215,7 +215,7 @@ var _ = ginkgo.Describe("Replication", func() { }) ginkgo.It("should return 'sync in progress' status (if synchronizing)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_SYNCHRONIZING}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStateSynchronizing}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -230,7 +230,7 @@ var _ = ginkgo.Describe("Replication", func() { }) ginkgo.It("should return 'sync in progress' status (if initializing)", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_INITIALIZING}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStateInitializing}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -248,7 +248,7 @@ var _ = ginkgo.Describe("Replication", func() { ginkgo.When("getting storage protection group status and state is error", func() { ginkgo.It("should return invalid status", func() { clientMock.On("GetReplicationSessionByLocalResourceID", mock.Anything, mock.Anything).Return( - gopowerstore.ReplicationSession{State: gopowerstore.RS_STATE_ERROR}, nil) + gopowerstore.ReplicationSession{State: gopowerstore.RsStateError}, nil) req := new(csiext.GetStorageProtectionGroupStatusRequest) params := make(map[string]string) @@ -330,11 +330,11 @@ var _ = ginkgo.Describe("Replication", func() { }) }) ginkgo.Describe("calling ExecuteAction()", func() { - ginkgo.When("action is RS_ACTION_RESUME and state is OK", func() { + ginkgo.When("action is RsActionResume and state is OK", func() { ginkgo.It("return nil", func() { clientMock.On("ExecuteActionOnReplicationSession", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("", nil) session := gopowerstore.ReplicationSession{ID: "test", State: "OK"} - action := gopowerstore.RS_ACTION_RESUME + action := gopowerstore.RsActionResume failoverParams := gopowerstore.FailoverParams{} err := controller.ExecuteAction(&session, clientMock, action, &failoverParams) @@ -342,11 +342,11 @@ var _ = ginkgo.Describe("Replication", func() { }) }) - ginkgo.When("action is RS_ACTION_REPROTECT and state is not OK", func() { + ginkgo.When("action is RsActionReprotect and state is not OK", func() { ginkgo.It("return nil", func() { clientMock.On("ExecuteActionOnReplicationSession", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("", nil) session := gopowerstore.ReplicationSession{ID: "test", State: "OK"} - action := gopowerstore.RS_ACTION_REPROTECT + action := gopowerstore.RsActionReprotect failoverParams := gopowerstore.FailoverParams{} err := controller.ExecuteAction(&session, clientMock, action, &failoverParams) @@ -354,11 +354,11 @@ var _ = ginkgo.Describe("Replication", func() { }) }) - ginkgo.When("action is RS_ACTION_PAUSE and state is Paused", func() { + ginkgo.When("action is RsActionPause and state is Paused", func() { ginkgo.It("return nil", func() { clientMock.On("ExecuteActionOnReplicationSession", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("", nil) session := gopowerstore.ReplicationSession{ID: "test", State: "Paused"} - action := gopowerstore.RS_ACTION_PAUSE + action := gopowerstore.RsActionPause failoverParams := gopowerstore.FailoverParams{} err := controller.ExecuteAction(&session, clientMock, action, &failoverParams) @@ -366,11 +366,11 @@ var _ = ginkgo.Describe("Replication", func() { }) }) - ginkgo.When("action is RS_ACTION_FAILOVER and state is Failing_Over", func() { + ginkgo.When("action is RsActionFailover and state is Failing_Over", func() { ginkgo.It("return nil", func() { clientMock.On("ExecuteActionOnReplicationSession", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("", nil) session := gopowerstore.ReplicationSession{ID: "test", State: "Failing_Over"} - action := gopowerstore.RS_ACTION_FAILOVER + action := gopowerstore.RsActionFailover failoverParams := gopowerstore.FailoverParams{} err := controller.ExecuteAction(&session, clientMock, action, &failoverParams) @@ -380,11 +380,11 @@ var _ = ginkgo.Describe("Replication", func() { }) }) - ginkgo.When("action is RS_ACTION_FAILOVER and state is Failed_Over", func() { + ginkgo.When("action is RsActionFailover and state is Failed_Over", func() { ginkgo.It("return nil", func() { clientMock.On("ExecuteActionOnReplicationSession", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("", nil) session := gopowerstore.ReplicationSession{ID: "test", State: "Failed_Over"} - action := gopowerstore.RS_ACTION_FAILOVER + action := gopowerstore.RsActionFailover failoverParams := gopowerstore.FailoverParams{} err := controller.ExecuteAction(&session, clientMock, action, &failoverParams) diff --git a/pkg/controller/snapshotter.go b/pkg/controller/snapshotter.go index 487bd8ba..1cd264fd 100644 --- a/pkg/controller/snapshotter.go +++ b/pkg/controller/snapshotter.go @@ -81,7 +81,7 @@ func (f FilesystemSnapshot) GetID() string { // GetSourceID returns ID of the volume/fs that snapshot was created from func (f FilesystemSnapshot) GetSourceID() string { - return f.ParentId + return f.ParentID } // GetSize returns current size of the snapshot diff --git a/pkg/node/acl.go b/pkg/node/acl.go index 34703bd6..5f334935 100644 --- a/pkg/node/acl.go +++ b/pkg/node/acl.go @@ -92,7 +92,7 @@ func isNfsv4Enabled(ctx context.Context, client gopowerstore.Client, nasName str nfsv4Enabled := false nas, err := gopowerstore.Client.GetNASByName(client, ctx, nasName) if err == nil { - nfsServer, err := gopowerstore.Client.GetNfsServer(client, ctx, nas.NfsServers[0].Id) + nfsServer, err := gopowerstore.Client.GetNfsServer(client, ctx, nas.NfsServers[0].ID) if err == nil { if nfsServer.IsNFSv4Enabled { nfsv4Enabled = true @@ -100,7 +100,7 @@ func isNfsv4Enabled(ctx context.Context, client gopowerstore.Client, nasName str log.Error(fmt.Sprintf("NFS v4 not enabled on NAS server: %s\n", nasName)) } } else { - log.Error(fmt.Sprintf("can't fetch nfs server with id %s: %s", nas.NfsServers[0].Id, err.Error())) + log.Error(fmt.Sprintf("can't fetch nfs server with id %s: %s", nas.NfsServers[0].ID, err.Error())) } } else { log.Error(fmt.Sprintf("can't determine nfsv4 enabled: %s", err.Error())) diff --git a/pkg/node/acl_test.go b/pkg/node/acl_test.go index f2cedd9e..4898669f 100644 --- a/pkg/node/acl_test.go +++ b/pkg/node/acl_test.go @@ -67,13 +67,13 @@ func TestNfsv4NasServer_Success(t *testing.T) { nfsServers := []gopowerstore.NFSServerInstance{ { - Id: validNfsServerID, + ID: validNfsServerID, IsNFSv4Enabled: true, }, } clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID, NfsServers: nfsServers}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, nil) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) isNFSv4Enabled := isNfsv4Enabled(context.Background(), clientMock, validNasName) expected := true @@ -86,7 +86,7 @@ func TestNfsv4NasServer_Err_GetNASByName(t *testing.T) { clientMock = new(gopowerstoremock.Client) clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID}, errors.New("GetNASByName_fail")) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, nil) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) isNFSv4Enabled := isNfsv4Enabled(context.Background(), clientMock, validNasName) expected := false @@ -100,13 +100,13 @@ func TestNfsv4NasServer_Err_GetNfsServer(t *testing.T) { nfsServers := []gopowerstore.NFSServerInstance{ { - Id: validNfsServerID, + ID: validNfsServerID, IsNFSv4Enabled: true, }, } clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID, NfsServers: nfsServers}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, errors.New("GetNfsServer_fail")) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, errors.New("GetNfsServer_fail")) isNFSv4Enabled := isNfsv4Enabled(context.Background(), clientMock, validNasName) expected := false @@ -120,13 +120,13 @@ func TestNfsv4NasServer_Fail(t *testing.T) { nfsServers := []gopowerstore.NFSServerInstance{ { - Id: validNfsServerID, + ID: validNfsServerID, IsNFSv4Enabled: true, }, } clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID, NfsServers: nfsServers}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: false}, nil) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: false}, nil) isNFSv4Enabled := isNfsv4Enabled(context.Background(), clientMock, validNasName) expected := false @@ -141,14 +141,14 @@ func TestValidateAndSetNfsACLs_Success_nfsv4Acls(t *testing.T) { nfsServers := []gopowerstore.NFSServerInstance{ { - Id: validNfsServerID, + ID: validNfsServerID, IsNFSv4Enabled: true, }, } nfsv4ACLsMock.On("SetNfsv4Acls", mock.Anything, mock.Anything).Return(nil) clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID, NfsServers: nfsServers}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, nil) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) aclConfigured, err := validateAndSetACLs(context.Background(), nfsv4ACLsMock, validNasName, clientMock, "A::OWNER@:RWX", "dir2") @@ -163,13 +163,13 @@ func TestValidateAndSetNfsACLs_Fail_InvalidAcls(t *testing.T) { nfsServers := []gopowerstore.NFSServerInstance{ { - Id: validNfsServerID, + ID: validNfsServerID, IsNFSv4Enabled: true, }, } clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID, NfsServers: nfsServers}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, nil) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) nfsv4ACLsMock.On("SetNfsv4Acls", mock.Anything, mock.Anything).Return(nil) aclConfigured, err := validateAndSetACLs(context.Background(), nfsv4ACLsMock, validNasName, clientMock, "abcd", "dir1") @@ -185,13 +185,13 @@ func TestValidateAndSetNfsACLs_Fail_GetNfsServerFail(t *testing.T) { nfsServers := []gopowerstore.NFSServerInstance{ { - Id: validNfsServerID, + ID: validNfsServerID, IsNFSv4Enabled: true, }, } clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID, NfsServers: nfsServers}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, errors.New("GetNfsServer_fail")) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, errors.New("GetNfsServer_fail")) nfsv4ACLsMock.On("SetNfsv4Acls", mock.Anything, mock.Anything).Return(nil) aclConfigured, err := validateAndSetACLs(context.Background(), nfsv4ACLsMock, validNasName, clientMock, "A::OWNER@:RWX", "dir1") diff --git a/pkg/node/node_test.go b/pkg/node/node_test.go index 9c28d061..d3058009 100644 --- a/pkg/node/node_test.go +++ b/pkg/node/node_test.go @@ -974,7 +974,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { scsiStageVolumeOK(utilMock, fsMock) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -996,7 +996,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { scsiStageVolumeOK(utilMock, fsMock) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1017,7 +1017,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { scsiStageVolumeOK(utilMock, fsMock) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1043,7 +1043,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { publishContext["NfsExportPath"] = validNfsExportPath res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validNfsVolumeID, + VolumeID: validNfsVolumeID, PublishContext: publishContext, StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1075,17 +1075,17 @@ var _ = ginkgo.Describe("CSINodeService", func() { nfsServers := []gopowerstore.NFSServerInstance{ { - Id: validNfsServerID, + ID: validNfsServerID, IsNFSv4Enabled: true, }, } - clientMock.On("GetNfsServer", mock.Anything, validNasName).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, nil) + clientMock.On("GetNfsServer", mock.Anything, validNasName).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) clientMock.On("GetNASByName", mock.Anything, validNasName).Return(gopowerstore.NAS{ID: validNasID, NfsServers: nfsServers}, nil) nfsv4ACLsMock.On("SetNfsv4Acls", mock.Anything, mock.Anything).Return(nil) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validNfsVolumeID, + VolumeID: validNfsVolumeID, PublishContext: publishContext, StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1116,17 +1116,17 @@ var _ = ginkgo.Describe("CSINodeService", func() { nfsServers := []gopowerstore.NFSServerInstance{ { - Id: validNfsServerID, + ID: validNfsServerID, IsNFSv4Enabled: true, }, } nfsv4ACLsMock.On("SetNfsv4Acls", mock.Anything, mock.Anything).Return(nil) clientMock.On("GetNASByName", mock.Anything, "").Return(gopowerstore.NAS{ID: validNasID, NfsServers: nfsServers}, nil) - clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{Id: validNfsServerID, IsNFSv4Enabled: true}, nil) + clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validNfsVolumeID, + VolumeID: validNfsVolumeID, PublishContext: publishContext, StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1154,7 +1154,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("GetDiskFormat", mock.Anything, stagingPath).Return("", nil) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1180,7 +1180,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("GetDiskFormat", mock.Anything, stagingPath).Return("", nil) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validNfsVolumeID, + VolumeID: validNfsVolumeID, PublishContext: publishContext, StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1202,7 +1202,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { }) }) - ginkgo.When("missing volume VolumeId", func() { + ginkgo.When("missing volume VolumeID", func() { ginkgo.It("should fail", func() { req := &csi.NodeStageVolumeRequest{ VolumeCapability: getCapabilityWithVoltypeAccessFstype("mount", "single-writer", "ext4"), @@ -1220,7 +1220,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.It("should fail", func() { req := &csi.NodeStageVolumeRequest{ VolumeCapability: getCapabilityWithVoltypeAccessFstype("mount", "single-writer", "ext4"), - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, } res, err := nodeSvc.NodeStageVolume(context.Background(), req) @@ -1251,7 +1251,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(4) fsMock.On("ParseProcMounts", context.Background(), mock.Anything). Return(mountInfo, nil).Twice() - fsMock.On("MkFileIdempotent", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)). + fsMock.On("MkFileIDempotent", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)). Return(true, nil).Once() fsMock.On("GetUtil").Return(utilMock) @@ -1264,7 +1264,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Remove", stagingPath).Return(nil).Once() res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1282,7 +1282,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsDeviceOrResourceBusy", e).Return(false) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1298,7 +1298,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("publish context is incorrect", func() { ginkgo.It("should fail [deviceWWN]", func() { res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: map[string]string{}, StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1311,7 +1311,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.It("should fail [volumeLUNAddress]", func() { res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: map[string]string{ common.PublishContextDeviceWWN: validDeviceWWN, }, @@ -1326,7 +1326,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.It("should fail [iscsiTargets]", func() { res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: map[string]string{ common.PublishContextDeviceWWN: validDeviceWWN, common.PublishContextLUNAddress: validLUNID, @@ -1344,7 +1344,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { nodeSvc.useNVME = true nodeSvc.useFC = true res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: map[string]string{ common.PublishContextDeviceWWN: validDeviceWWN, common.PublishContextLUNAddress: validLUNID, @@ -1361,7 +1361,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.It("should fail [fcTargets]", func() { nodeSvc.useFC = true res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: map[string]string{ common.PublishContextDeviceWWN: validDeviceWWN, common.PublishContextLUNAddress: validLUNID, @@ -1386,7 +1386,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { scsiStageVolumeOK(utilMock, fsMock) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1407,13 +1407,13 @@ var _ = ginkgo.Describe("CSINodeService", func() { }).Return(gobrick.Device{}, nil) fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIdempotent", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)).Return(true, nil) + fsMock.On("MkFileIDempotent", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)).Return(true, nil) fsMock.On("GetUtil").Return(utilMock) utilMock.On("BindMount", mock.Anything, "/dev", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)).Return(e) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1453,7 +1453,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1482,7 +1482,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) _, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, StagingTargetPath: "", }) gomega.Expect(err.Error()).To(gomega.ContainSubstring("staging target path is required")) @@ -1510,7 +1510,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) _, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err.Error()).To(gomega.ContainSubstring("could not reliably determine existing mount for path")) @@ -1538,7 +1538,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) _, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err.Error()).To(gomega.ContainSubstring("could not unmount de")) @@ -1566,7 +1566,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1596,7 +1596,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1627,7 +1627,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1664,7 +1664,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Remove", path.Join(nodeSvc.opts.TmpDir, validBaseVolumeID)).Return(nil) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1689,7 +1689,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Remove", stagingPath).Return(nil) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeId: validNfsVolumeID, + VolumeID: validNfsVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1713,7 +1713,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "").Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1736,7 +1736,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1758,7 +1758,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "ext4", "ro").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1780,7 +1780,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1802,7 +1802,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1824,7 +1824,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "ext4").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1846,7 +1846,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1862,11 +1862,11 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIdempotent", validTargetPath).Return(true, nil) + fsMock.On("MkFileIDempotent", validTargetPath).Return(true, nil) utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1883,11 +1883,11 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIdempotent", validTargetPath).Return(true, nil) + fsMock.On("MkFileIDempotent", validTargetPath).Return(true, nil) utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath, "ro").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1903,11 +1903,11 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIdempotent", validTargetPath).Return(false, errors.New("failed")) + fsMock.On("MkFileIDempotent", validTargetPath).Return(false, errors.New("failed")) utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1923,11 +1923,11 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIdempotent", validTargetPath).Return(true, nil) + fsMock.On("MkFileIDempotent", validTargetPath).Return(true, nil) utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(errors.New("failed to bind")) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1949,7 +1949,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validNfsVolumeID, + VolumeID: validNfsVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1963,7 +1963,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("No volume ID specified", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: "", + VolumeID: "", PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1977,7 +1977,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("No target path specified", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: "", @@ -1991,7 +1991,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("Invalid volume capabilities specified", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2005,7 +2005,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("No staging target path specified", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: "", TargetPath: validTargetPath, @@ -2028,7 +2028,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validNfsVolumeID, + VolumeID: validNfsVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2050,7 +2050,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath, "ro").Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validNfsVolumeID, + VolumeID: validNfsVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2073,7 +2073,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath, "ro").Return(errors.New("bind failed")) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validNfsVolumeID, + VolumeID: validNfsVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2103,7 +2103,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Unmount", mock.Anything, validTargetPath).Return(nil) res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Expect(err).To(gomega.BeNil()) @@ -2127,7 +2127,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Unmount", mock.Anything, validTargetPath).Return(nil) res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: validNfsVolumeID, + VolumeID: validNfsVolumeID, TargetPath: validTargetPath, }) gomega.Expect(err).To(gomega.BeNil()) @@ -2137,7 +2137,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("No target path specified", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, TargetPath: "", }) gomega.Expect(err.Error()).To(gomega.Equal("rpc error: code = InvalidArgument desc = target path required")) @@ -2147,7 +2147,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("Unable to get volID", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: "", + VolumeID: "", TargetPath: validTargetPath, }) gomega.Expect(err.Error()).To(gomega.Equal("rpc error: code = InvalidArgument desc = volume ID is required")) @@ -2161,7 +2161,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return(nil, errors.New("error")) fsMock.On("Stat", mock.Anything).Return(&mocks.FileInfo{}, os.ErrNotExist) res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Expect(err.Error()).To(gomega.ContainSubstring("could not reliably determine existing mount status")) @@ -2184,7 +2184,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Stat", mock.Anything).Return(&mocks.FileInfo{}, os.ErrNotExist) utilMock.On("Unmount", mock.Anything, validTargetPath).Return(errors.New("Unmount failed")) res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Expect(err.Error()).To(gomega.ContainSubstring("could not unmount dev")) @@ -2408,7 +2408,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { }, nil) _, err := nodeSvc.NodeExpandVolume(context.Background(), &csi.NodeExpandVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, VolumePath: "", CapacityRange: &csi.CapacityRange{ RequiredBytes: 2234234, @@ -2431,7 +2431,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { }, errors.New("err")).Times(1) _, err := nodeSvc.NodeExpandVolume(context.Background(), &csi.NodeExpandVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, VolumePath: validTargetPath, CapacityRange: &csi.CapacityRange{ RequiredBytes: 2234234, @@ -2615,7 +2615,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, @@ -2641,14 +2641,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, "/dev", mock.Anything).Return(nil) fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIdempotent", mock.Anything).Return(true, nil) + fsMock.On("MkFileIDempotent", mock.Anything).Return(true, nil) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) fsMock.On("MkdirAll", mock.Anything, mock.Anything).Return(nil) utilMock.On("GetDiskFormat", mock.Anything, mock.Anything).Return("", nil) fsMock.On("ExecCommand", "mkfs.ext4", "-E", "nodiscard", "-F", mock.Anything).Return([]byte{}, nil) utilMock.On("Mount", mock.Anything, mock.Anything, mock.Anything, "ext4").Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2681,15 +2681,15 @@ var _ = ginkgo.Describe("CSINodeService", func() { }).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, }, }, nil) ctrlMock.On("ControllerPublishVolume", mock.Anything, &csi.ControllerPublishVolumeRequest{ - VolumeId: validBlockVolumeID, - NodeId: validNodeID, + VolumeID: validBlockVolumeID, + NodeID: validNodeID, VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, @@ -2721,7 +2721,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Unmount", mock.Anything, mock.Anything).Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2744,7 +2744,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, @@ -2770,7 +2770,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, "/dev", mock.Anything).Return(nil) fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIdempotent", mock.Anything).Return(true, errors.New("error")) + fsMock.On("MkFileIDempotent", mock.Anything).Return(true, errors.New("error")) fsMock.On("GetUtil").Return(utilMock) mountInfo := []gofsutil.Info{ @@ -2788,7 +2788,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Unmount", mock.Anything, mock.Anything).Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2807,7 +2807,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Stat", mock.Anything).Return(&mocks.FileInfo{}, nil) fsMock.On("MkdirAll", mock.Anything, mock.Anything).Return(nil).Times(2) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2826,7 +2826,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Stat", mock.Anything).Return(&mocks.FileInfo{}, os.ErrNotExist) fsMock.On("MkdirAll", mock.Anything, mock.Anything).Return(errors.New("err")).Times(2) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2849,14 +2849,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, }, }, errors.New("Failed")) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2879,14 +2879,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, }, }, nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2909,14 +2909,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, }, }, nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2940,7 +2940,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, @@ -2966,14 +2966,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, "/dev", mock.Anything).Return(nil) fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIdempotent", mock.Anything).Return(true, nil) + fsMock.On("MkFileIDempotent", mock.Anything).Return(true, nil) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) fsMock.On("MkdirAll", mock.Anything, mock.Anything).Return(nil) utilMock.On("GetDiskFormat", mock.Anything, mock.Anything).Return("", nil) fsMock.On("ExecCommand", "mkfs.xfs", "-K", mock.Anything, "-m", mock.Anything).Return([]byte{}, nil) utilMock.On("Mount", mock.Anything, mock.Anything, mock.Anything, "xfs", mock.Anything).Return(errors.New("err")) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -3015,14 +3015,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) fsMock.On("ReadFile", mock.Anything).Return([]byte("Some data"), nil) ctrlMock.On("ControllerUnpublishVolume", mock.Anything, &csi.ControllerUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, - NodeId: validNodeID, + VolumeID: validBlockVolumeID, + NodeID: validNodeID, }).Return(&csi.ControllerUnpublishVolumeResponse{}, nil) ctrlMock.On("DeleteVolume", mock.Anything, &csi.DeleteVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, }).Return(&csi.DeleteVolumeResponse{}, nil) res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Ω(err).To(gomega.BeNil()) @@ -3046,7 +3046,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("GetUtil").Return(utilMock) fsMock.On("ReadFile", ephemerallockfile).Return([]byte(validBlockVolumeID), os.ErrNotExist) _, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Ω(err.Error()).To(gomega.ContainSubstring("Was unable to read lockfile")) @@ -3078,14 +3078,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) fsMock.On("ReadFile", mock.Anything).Return([]byte("Some data"), nil) ctrlMock.On("ControllerUnpublishVolume", mock.Anything, &csi.ControllerUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, - NodeId: validNodeID, + VolumeID: validBlockVolumeID, + NodeID: validNodeID, }).Return(&csi.ControllerUnpublishVolumeResponse{}, errors.New("failed")) ctrlMock.On("DeleteVolume", mock.Anything, &csi.DeleteVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, }).Return(&csi.DeleteVolumeResponse{}, nil) _, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Ω(err.Error()).To(gomega.ContainSubstring("Inline ephemeral controller unpublish")) @@ -3117,14 +3117,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) fsMock.On("ReadFile", mock.Anything).Return([]byte("Some data"), nil) ctrlMock.On("ControllerUnpublishVolume", mock.Anything, &csi.ControllerUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, - NodeId: validNodeID, + VolumeID: validBlockVolumeID, + NodeID: validNodeID, }).Return(&csi.ControllerUnpublishVolumeResponse{}, nil) ctrlMock.On("DeleteVolume", mock.Anything, &csi.DeleteVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, }).Return(&csi.DeleteVolumeResponse{}, errors.New("failed")) _, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeId: validBlockVolumeID, + VolumeID: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Ω(err.Error()).To(gomega.ContainSubstring("failed")) @@ -3156,7 +3156,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3195,7 +3195,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3234,7 +3234,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3271,7 +3271,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3307,7 +3307,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3335,7 +3335,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3369,7 +3369,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3420,7 +3420,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3473,7 +3473,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3526,7 +3526,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3555,7 +3555,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3586,7 +3586,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3626,7 +3626,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3666,7 +3666,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3704,7 +3704,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3738,7 +3738,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3773,7 +3773,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3803,7 +3803,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeId: nodeSvc.nodeID, + NodeID: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3992,7 +3992,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { clientMock.On("GetFS", mock.Anything, validBaseVolumeID). Return(gopowerstore.FileSystem{ID: validBaseVolumeID}, nil) - req := &csi.NodeGetVolumeStatsRequest{VolumeId: validBlockVolumeID, VolumePath: ""} + req := &csi.NodeGetVolumeStatsRequest{VolumeID: validBlockVolumeID, VolumePath: ""} res, err := nodeSvc.NodeGetVolumeStats(context.Background(), req) @@ -4055,7 +4055,7 @@ func getNodeVolumeExpandValidRequest(volid string, isBlock bool) *csi.NodeExpand var size int64 = controller.MaxVolumeSizeBytes / 100 if !isBlock { req := csi.NodeExpandVolumeRequest{ - VolumeId: volid, + VolumeID: volid, VolumePath: validTargetPath, CapacityRange: &csi.CapacityRange{ RequiredBytes: size, @@ -4065,7 +4065,7 @@ func getNodeVolumeExpandValidRequest(volid string, isBlock bool) *csi.NodeExpand return &req } req := csi.NodeExpandVolumeRequest{ - VolumeId: volid, + VolumeID: volid, VolumePath: validTargetPath + "/csi/volumeDevices/publish/", CapacityRange: &csi.CapacityRange{ RequiredBytes: size, From 05b6ca2daf63ed24a58bc2d19cf1efb2f01f598a Mon Sep 17 00:00:00 2001 From: Utkarsh Dubey Date: Tue, 23 Jan 2024 17:16:23 +0530 Subject: [PATCH 3/4] Update gopowerstore commit ID --- go.mod | 3 +- go.sum | 4 +- pkg/controller/publisher.go | 2 +- pkg/node/node_test.go | 212 ++++++++++++++++++------------------ tests/e2e/go.mod | 2 +- tests/e2e/go.sum | 1 + 6 files changed, 112 insertions(+), 112 deletions(-) diff --git a/go.mod b/go.mod index b0f86693..d981560e 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,6 @@ module github.com/dell/csi-powerstore/v2 go 1.21 -replace github.com/dell/gopowerstore => ../gopowerstore require ( github.com/akutz/gosync v0.1.0 github.com/apparentlymart/go-cidr v1.1.0 @@ -17,7 +16,7 @@ require ( github.com/dell/gofsutil v1.14.0 github.com/dell/goiscsi v1.8.0 github.com/dell/gonvme v1.6.0 - github.com/dell/gopowerstore v1.14.0 + github.com/dell/gopowerstore v1.14.1-0.20240123112046-ec40aaf31242 github.com/fsnotify/fsnotify v1.7.0 github.com/go-openapi/strfmt v0.21.3 github.com/golang/mock v1.6.0 diff --git a/go.sum b/go.sum index f310fa67..464a59e7 100644 --- a/go.sum +++ b/go.sum @@ -138,8 +138,8 @@ github.com/dell/goiscsi v1.8.0 h1:kocGVOdgnufc6eGpfmwP66hyhY7OVgIafaS/+uM6ogU= github.com/dell/goiscsi v1.8.0/go.mod h1:PTlQGJaGKYgia95mGwwHSBgvfOr3BfLIjGNh1HT6p+s= github.com/dell/gonvme v1.6.0 h1:Y/g0Ml8E3oSB+bqGJN1/U+V621h9t0KJeYAF5aQ7NVU= github.com/dell/gonvme v1.6.0/go.mod h1:/UgJAlR03LbPSDIK2BfhiYUlzyY7lAMJ6ao8eYab2Eg= -github.com/dell/gopowerstore v1.14.0 h1:j4wSn25X2AH2t3/ySNPHmNWjqM0zHDkYyDPt+0uE8cc= -github.com/dell/gopowerstore v1.14.0/go.mod h1:YH3SpMX2dr3ouYWWPhk5lzjip3aaVVksFOSoenRDY5w= +github.com/dell/gopowerstore v1.14.1-0.20240123112046-ec40aaf31242 h1:kR8Opp5Rr3syF5uJnmDJNe94QrUbqmLXsjTDrENFG88= +github.com/dell/gopowerstore v1.14.1-0.20240123112046-ec40aaf31242/go.mod h1:YH3SpMX2dr3ouYWWPhk5lzjip3aaVVksFOSoenRDY5w= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= diff --git a/pkg/controller/publisher.go b/pkg/controller/publisher.go index e745c0f8..43116d5d 100644 --- a/pkg/controller/publisher.go +++ b/pkg/controller/publisher.go @@ -203,7 +203,7 @@ func (s *SCSIPublisher) addTargetsInfoToPublishContext( } // If the system is not capable of any protocol, then we will through the error - if len(iscsiTargetsInfo) == 0 && len(fcTargetsInfo) == 0 && len(nvmefcTargetInfo) == 0 { + if len(iscsiTargetsInfo) == 0 && len(fcTargetsInfo) == 0 && len(nvmefcTargetInfo) == 0 && len(nvmetcpTargetInfo) == 0 { return errors.New("unable to get targets for any protocol") } return nil diff --git a/pkg/node/node_test.go b/pkg/node/node_test.go index d3058009..30e53629 100644 --- a/pkg/node/node_test.go +++ b/pkg/node/node_test.go @@ -974,7 +974,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { scsiStageVolumeOK(utilMock, fsMock) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -996,7 +996,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { scsiStageVolumeOK(utilMock, fsMock) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1017,7 +1017,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { scsiStageVolumeOK(utilMock, fsMock) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1043,7 +1043,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { publishContext["NfsExportPath"] = validNfsExportPath res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validNfsVolumeID, + VolumeId: validNfsVolumeID, PublishContext: publishContext, StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1085,7 +1085,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { nfsv4ACLsMock.On("SetNfsv4Acls", mock.Anything, mock.Anything).Return(nil) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validNfsVolumeID, + VolumeId: validNfsVolumeID, PublishContext: publishContext, StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1126,7 +1126,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { clientMock.On("GetNfsServer", mock.Anything, mock.Anything).Return(gopowerstore.NFSServerInstance{ID: validNfsServerID, IsNFSv4Enabled: true}, nil) nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validNfsVolumeID, + VolumeId: validNfsVolumeID, PublishContext: publishContext, StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1154,7 +1154,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("GetDiskFormat", mock.Anything, stagingPath).Return("", nil) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1180,7 +1180,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("GetDiskFormat", mock.Anything, stagingPath).Return("", nil) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validNfsVolumeID, + VolumeId: validNfsVolumeID, PublishContext: publishContext, StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1220,7 +1220,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.It("should fail", func() { req := &csi.NodeStageVolumeRequest{ VolumeCapability: getCapabilityWithVoltypeAccessFstype("mount", "single-writer", "ext4"), - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, } res, err := nodeSvc.NodeStageVolume(context.Background(), req) @@ -1264,7 +1264,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Remove", stagingPath).Return(nil).Once() res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1282,7 +1282,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsDeviceOrResourceBusy", e).Return(false) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1298,7 +1298,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("publish context is incorrect", func() { ginkgo.It("should fail [deviceWWN]", func() { res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: map[string]string{}, StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1311,7 +1311,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.It("should fail [volumeLUNAddress]", func() { res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: map[string]string{ common.PublishContextDeviceWWN: validDeviceWWN, }, @@ -1326,7 +1326,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.It("should fail [iscsiTargets]", func() { res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: map[string]string{ common.PublishContextDeviceWWN: validDeviceWWN, common.PublishContextLUNAddress: validLUNID, @@ -1344,7 +1344,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { nodeSvc.useNVME = true nodeSvc.useFC = true res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: map[string]string{ common.PublishContextDeviceWWN: validDeviceWWN, common.PublishContextLUNAddress: validLUNID, @@ -1361,7 +1361,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.It("should fail [fcTargets]", func() { nodeSvc.useFC = true res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: map[string]string{ common.PublishContextDeviceWWN: validDeviceWWN, common.PublishContextLUNAddress: validLUNID, @@ -1386,7 +1386,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { scsiStageVolumeOK(utilMock, fsMock) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1413,7 +1413,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, "/dev", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)).Return(e) res, err := nodeSvc.NodeStageVolume(context.Background(), &csi.NodeStageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: nodeStagePrivateDir, VolumeCapability: getCapabilityWithVoltypeAccessFstype( @@ -1453,7 +1453,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1482,7 +1482,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) _, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, StagingTargetPath: "", }) gomega.Expect(err.Error()).To(gomega.ContainSubstring("staging target path is required")) @@ -1510,7 +1510,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) _, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err.Error()).To(gomega.ContainSubstring("could not reliably determine existing mount for path")) @@ -1538,7 +1538,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) _, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err.Error()).To(gomega.ContainSubstring("could not unmount de")) @@ -1566,7 +1566,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1596,7 +1596,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1627,7 +1627,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1664,7 +1664,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Remove", path.Join(nodeSvc.opts.TmpDir, validBaseVolumeID)).Return(nil) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1689,7 +1689,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Remove", stagingPath).Return(nil) res, err := nodeSvc.NodeUnstageVolume(context.Background(), &csi.NodeUnstageVolumeRequest{ - VolumeID: validNfsVolumeID, + VolumeId: validNfsVolumeID, StagingTargetPath: nodeStagePrivateDir, }) gomega.Expect(err).To(gomega.BeNil()) @@ -1713,7 +1713,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "").Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1736,7 +1736,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1758,7 +1758,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "ext4", "ro").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1780,7 +1780,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1802,7 +1802,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1824,7 +1824,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "ext4").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1846,7 +1846,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Mount", mock.Anything, stagingPath, validTargetPath, "").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1866,7 +1866,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1887,7 +1887,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath, "ro").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1907,7 +1907,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1927,7 +1927,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(errors.New("failed to bind")) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1949,7 +1949,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validNfsVolumeID, + VolumeId: validNfsVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1963,7 +1963,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("No volume ID specified", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: "", + VolumeId: "", PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -1977,7 +1977,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("No target path specified", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: "", @@ -1991,7 +1991,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("Invalid volume capabilities specified", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2005,7 +2005,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("No staging target path specified", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: "", TargetPath: validTargetPath, @@ -2028,7 +2028,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validNfsVolumeID, + VolumeId: validNfsVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2050,7 +2050,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath, "ro").Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validNfsVolumeID, + VolumeId: validNfsVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2073,7 +2073,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath, "ro").Return(errors.New("bind failed")) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validNfsVolumeID, + VolumeId: validNfsVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2103,7 +2103,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Unmount", mock.Anything, validTargetPath).Return(nil) res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Expect(err).To(gomega.BeNil()) @@ -2127,7 +2127,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Unmount", mock.Anything, validTargetPath).Return(nil) res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeID: validNfsVolumeID, + VolumeId: validNfsVolumeID, TargetPath: validTargetPath, }) gomega.Expect(err).To(gomega.BeNil()) @@ -2137,7 +2137,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("No target path specified", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, TargetPath: "", }) gomega.Expect(err.Error()).To(gomega.Equal("rpc error: code = InvalidArgument desc = target path required")) @@ -2147,7 +2147,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.When("Unable to get volID", func() { ginkgo.It("should fail", func() { res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeID: "", + VolumeId: "", TargetPath: validTargetPath, }) gomega.Expect(err.Error()).To(gomega.Equal("rpc error: code = InvalidArgument desc = volume ID is required")) @@ -2161,7 +2161,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return(nil, errors.New("error")) fsMock.On("Stat", mock.Anything).Return(&mocks.FileInfo{}, os.ErrNotExist) res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Expect(err.Error()).To(gomega.ContainSubstring("could not reliably determine existing mount status")) @@ -2184,7 +2184,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Stat", mock.Anything).Return(&mocks.FileInfo{}, os.ErrNotExist) utilMock.On("Unmount", mock.Anything, validTargetPath).Return(errors.New("Unmount failed")) res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Expect(err.Error()).To(gomega.ContainSubstring("could not unmount dev")) @@ -2408,7 +2408,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { }, nil) _, err := nodeSvc.NodeExpandVolume(context.Background(), &csi.NodeExpandVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, VolumePath: "", CapacityRange: &csi.CapacityRange{ RequiredBytes: 2234234, @@ -2431,7 +2431,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { }, errors.New("err")).Times(1) _, err := nodeSvc.NodeExpandVolume(context.Background(), &csi.NodeExpandVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, VolumePath: validTargetPath, CapacityRange: &csi.CapacityRange{ RequiredBytes: 2234234, @@ -2615,7 +2615,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, @@ -2648,7 +2648,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ExecCommand", "mkfs.ext4", "-E", "nodiscard", "-F", mock.Anything).Return([]byte{}, nil) utilMock.On("Mount", mock.Anything, mock.Anything, mock.Anything, "ext4").Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2681,15 +2681,15 @@ var _ = ginkgo.Describe("CSINodeService", func() { }).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, }, }, nil) ctrlMock.On("ControllerPublishVolume", mock.Anything, &csi.ControllerPublishVolumeRequest{ - VolumeID: validBlockVolumeID, - NodeID: validNodeID, + VolumeId: validBlockVolumeID, + NodeId: validNodeID, VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, @@ -2721,7 +2721,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Unmount", mock.Anything, mock.Anything).Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2744,7 +2744,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, @@ -2788,7 +2788,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("Unmount", mock.Anything, mock.Anything).Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2807,7 +2807,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Stat", mock.Anything).Return(&mocks.FileInfo{}, nil) fsMock.On("MkdirAll", mock.Anything, mock.Anything).Return(nil).Times(2) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2826,7 +2826,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("Stat", mock.Anything).Return(&mocks.FileInfo{}, os.ErrNotExist) fsMock.On("MkdirAll", mock.Anything, mock.Anything).Return(errors.New("err")).Times(2) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2849,14 +2849,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, }, }, errors.New("Failed")) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2879,14 +2879,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, }, }, nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2909,14 +2909,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, }, }, nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -2940,7 +2940,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { ctrlMock.On("CreateVolume", mock.Anything, mock.Anything).Return(&csi.CreateVolumeResponse{ Volume: &csi.Volume{ CapacityBytes: validVolSize, - VolumeID: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), + VolumeId: filepath.Join(validBaseVolumeID, firstValidIP, "scsi"), VolumeContext: map[string]string{ common.KeyArrayID: firstValidIP, }, @@ -2973,7 +2973,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ExecCommand", "mkfs.xfs", "-K", mock.Anything, "-m", mock.Anything).Return([]byte{}, nil) utilMock.On("Mount", mock.Anything, mock.Anything, mock.Anything, "xfs", mock.Anything).Return(errors.New("err")) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, PublishContext: getValidPublishContext(), StagingTargetPath: validStagingPath, TargetPath: validTargetPath, @@ -3015,14 +3015,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) fsMock.On("ReadFile", mock.Anything).Return([]byte("Some data"), nil) ctrlMock.On("ControllerUnpublishVolume", mock.Anything, &csi.ControllerUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, - NodeID: validNodeID, + VolumeId: validBlockVolumeID, + NodeId: validNodeID, }).Return(&csi.ControllerUnpublishVolumeResponse{}, nil) ctrlMock.On("DeleteVolume", mock.Anything, &csi.DeleteVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, }).Return(&csi.DeleteVolumeResponse{}, nil) res, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Ω(err).To(gomega.BeNil()) @@ -3046,7 +3046,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("GetUtil").Return(utilMock) fsMock.On("ReadFile", ephemerallockfile).Return([]byte(validBlockVolumeID), os.ErrNotExist) _, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Ω(err.Error()).To(gomega.ContainSubstring("Was unable to read lockfile")) @@ -3078,14 +3078,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) fsMock.On("ReadFile", mock.Anything).Return([]byte("Some data"), nil) ctrlMock.On("ControllerUnpublishVolume", mock.Anything, &csi.ControllerUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, - NodeID: validNodeID, + VolumeId: validBlockVolumeID, + NodeId: validNodeID, }).Return(&csi.ControllerUnpublishVolumeResponse{}, errors.New("failed")) ctrlMock.On("DeleteVolume", mock.Anything, &csi.DeleteVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, }).Return(&csi.DeleteVolumeResponse{}, nil) _, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Ω(err.Error()).To(gomega.ContainSubstring("Inline ephemeral controller unpublish")) @@ -3117,14 +3117,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("IsNotExist", mock.Anything).Return(false) fsMock.On("ReadFile", mock.Anything).Return([]byte("Some data"), nil) ctrlMock.On("ControllerUnpublishVolume", mock.Anything, &csi.ControllerUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, - NodeID: validNodeID, + VolumeId: validBlockVolumeID, + NodeId: validNodeID, }).Return(&csi.ControllerUnpublishVolumeResponse{}, nil) ctrlMock.On("DeleteVolume", mock.Anything, &csi.DeleteVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, }).Return(&csi.DeleteVolumeResponse{}, errors.New("failed")) _, err := nodeSvc.NodeUnpublishVolume(context.Background(), &csi.NodeUnpublishVolumeRequest{ - VolumeID: validBlockVolumeID, + VolumeId: validBlockVolumeID, TargetPath: validTargetPath, }) gomega.Ω(err.Error()).To(gomega.ContainSubstring("failed")) @@ -3156,7 +3156,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3195,7 +3195,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3234,7 +3234,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3271,7 +3271,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3307,7 +3307,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3335,7 +3335,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3369,7 +3369,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3420,7 +3420,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3473,7 +3473,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3526,7 +3526,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3555,7 +3555,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3586,7 +3586,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3626,7 +3626,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3666,7 +3666,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3704,7 +3704,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3738,7 +3738,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3773,7 +3773,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3803,7 +3803,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { res, err := nodeSvc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.NodeGetInfoResponse{ - NodeID: nodeSvc.nodeID, + NodeId: nodeSvc.nodeID, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ common.Name + "/" + firstValidIP + "-nfs": "true", @@ -3992,7 +3992,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { clientMock.On("GetFS", mock.Anything, validBaseVolumeID). Return(gopowerstore.FileSystem{ID: validBaseVolumeID}, nil) - req := &csi.NodeGetVolumeStatsRequest{VolumeID: validBlockVolumeID, VolumePath: ""} + req := &csi.NodeGetVolumeStatsRequest{VolumeId: validBlockVolumeID, VolumePath: ""} res, err := nodeSvc.NodeGetVolumeStats(context.Background(), req) @@ -4055,7 +4055,7 @@ func getNodeVolumeExpandValidRequest(volid string, isBlock bool) *csi.NodeExpand var size int64 = controller.MaxVolumeSizeBytes / 100 if !isBlock { req := csi.NodeExpandVolumeRequest{ - VolumeID: volid, + VolumeId: volid, VolumePath: validTargetPath, CapacityRange: &csi.CapacityRange{ RequiredBytes: size, @@ -4065,7 +4065,7 @@ func getNodeVolumeExpandValidRequest(volid string, isBlock bool) *csi.NodeExpand return &req } req := csi.NodeExpandVolumeRequest{ - VolumeID: volid, + VolumeId: volid, VolumePath: validTargetPath + "/csi/volumeDevices/publish/", CapacityRange: &csi.CapacityRange{ RequiredBytes: size, diff --git a/tests/e2e/go.mod b/tests/e2e/go.mod index 3b96631c..a9f0d522 100644 --- a/tests/e2e/go.mod +++ b/tests/e2e/go.mod @@ -13,7 +13,7 @@ require ( require ( github.com/dell/csi-powerstore/v2 v2.8.0 - github.com/dell/gopowerstore v1.14.0 + github.com/dell/gopowerstore v1.14.1-0.20240123112046-ec40aaf31242 github.com/onsi/ginkgo/v2 v2.13.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.0 diff --git a/tests/e2e/go.sum b/tests/e2e/go.sum index 67ac8004..35843209 100644 --- a/tests/e2e/go.sum +++ b/tests/e2e/go.sum @@ -114,6 +114,7 @@ github.com/dell/gonvme v1.6.0 h1:Y/g0Ml8E3oSB+bqGJN1/U+V621h9t0KJeYAF5aQ7NVU= github.com/dell/gonvme v1.6.0/go.mod h1:/UgJAlR03LbPSDIK2BfhiYUlzyY7lAMJ6ao8eYab2Eg= github.com/dell/gopowerstore v1.14.0 h1:j4wSn25X2AH2t3/ySNPHmNWjqM0zHDkYyDPt+0uE8cc= github.com/dell/gopowerstore v1.14.0/go.mod h1:YH3SpMX2dr3ouYWWPhk5lzjip3aaVVksFOSoenRDY5w= +github.com/dell/gopowerstore v1.14.1-0.20240123112046-ec40aaf31242/go.mod h1:YH3SpMX2dr3ouYWWPhk5lzjip3aaVVksFOSoenRDY5w= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= From d2b9a280b37eac3619c3f0f26b59c053b4948d0f Mon Sep 17 00:00:00 2001 From: Utkarsh Dubey Date: Thu, 25 Jan 2024 11:06:01 +0530 Subject: [PATCH 4/4] Adds unit test --- pkg/common/common_test.go | 11 ++++ pkg/controller/controller_test.go | 86 +++++++++++++++++++++-------- pkg/controller/publisher_test.go | 2 + pkg/node/node_test.go | 91 ++++++++++++++++++++++++++----- 4 files changed, 151 insertions(+), 39 deletions(-) diff --git a/pkg/common/common_test.go b/pkg/common/common_test.go index 99b90b2c..080d5423 100644 --- a/pkg/common/common_test.go +++ b/pkg/common/common_test.go @@ -118,6 +118,17 @@ func TestGetISCSITargetsInfoFromStorage(t *testing.T) { }) } +func TestGetNVMETCPTargetsInfoFromStorage(t *testing.T) { + t.Run("api error", func(t *testing.T) { + e := errors.New("some error") + clientMock := new(gopowerstoremock.Client) + clientMock.On("GetCluster", context.Background()).Return(gopowerstore.Cluster{}, e) + clientMock.On("GetStorageNVMETCPTargetAddresses", context.Background()).Return([]gopowerstore.IPPoolAddress{}, e) + _, err := common.GetNVMETCPTargetsInfoFromStorage(clientMock, "A1") + assert.EqualError(t, err, e.Error()) + }) +} + func TestGetFCTargetsInfoFromStorage(t *testing.T) { t.Run("api error", func(t *testing.T) { e := errors.New("some error") diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index fd5cc383..aa6f3129 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -1880,7 +1880,13 @@ var _ = ginkgo.Describe("CSIControllerService", func() { IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, }, }, nil) - + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). + Return([]gopowerstore.IPPoolAddress{ + { + Address: "192.168.1.1", + IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, + }, + }, nil) clientMock.On("GetFCPorts", mock.Anything). Return([]gopowerstore.FcPort{ { @@ -1902,13 +1908,15 @@ var _ = ginkgo.Describe("CSIControllerService", func() { gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.ControllerPublishVolumeResponse{ PublishContext: map[string]string{ - "PORTAL0": "192.168.1.1:3260", - "TARGET0": "iqn", - "NVMEFCPORTAL0": "nn-0x58ccf090c9200c22:pn-0x58ccf091492b0c22", - "NVMEFCTARGET0": "nqn", - "DEVICE_WWN": "68ccf098003ceb5e4577a20be6d11bf9", - "LUN_ADDRESS": "1", - "FCWWPN0": "58ccf09348a003a3", + "PORTAL0": "192.168.1.1:3260", + "TARGET0": "iqn", + "NVMEFCPORTAL0": "nn-0x58ccf090c9200c22:pn-0x58ccf091492b0c22", + "NVMEFCTARGET0": "nqn", + "DEVICE_WWN": "68ccf098003ceb5e4577a20be6d11bf9", + "LUN_ADDRESS": "1", + "FCWWPN0": "58ccf09348a003a3", + "NVMETCPTARGET0": "nqn", + "NVMETCPPORTAL0": "192.168.1.1:4420", }, })) }) @@ -2073,7 +2081,13 @@ var _ = ginkgo.Describe("CSIControllerService", func() { IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, }, }, nil) - + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). + Return([]gopowerstore.IPPoolAddress{ + { + Address: "192.168.1.1", + IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, + }, + }, nil) clientMock.On("GetFCPorts", mock.Anything). Return([]gopowerstore.FcPort{ { @@ -2092,10 +2106,12 @@ var _ = ginkgo.Describe("CSIControllerService", func() { gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.ControllerPublishVolumeResponse{ PublishContext: map[string]string{ - "PORTAL0": "192.168.1.1:3260", - "TARGET0": "iqn", - "DEVICE_WWN": "68ccf098003ceb5e4577a20be6d11bf9", - "LUN_ADDRESS": "1", + "PORTAL0": "192.168.1.1:3260", + "TARGET0": "iqn", + "DEVICE_WWN": "68ccf098003ceb5e4577a20be6d11bf9", + "LUN_ADDRESS": "1", + "NVMETCPPORTAL0": "192.168.1.1:4420", + "NVMETCPTARGET0": "", }, })) }) @@ -2183,7 +2199,13 @@ var _ = ginkgo.Describe("CSIControllerService", func() { IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, }, }, nil) - + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). + Return([]gopowerstore.IPPoolAddress{ + { + Address: "192.168.1.1", + IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, + }, + }, nil) clientMock.On("GetFCPorts", mock.Anything). Return([]gopowerstore.FcPort{ { @@ -2202,10 +2224,12 @@ var _ = ginkgo.Describe("CSIControllerService", func() { gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.ControllerPublishVolumeResponse{ PublishContext: map[string]string{ - "PORTAL0": "192.168.1.1:3260", - "TARGET0": "iqn", - "DEVICE_WWN": "68ccf098003ceb5e4577a20be6d11bf9", - "LUN_ADDRESS": "1", + "PORTAL0": "192.168.1.1:3260", + "TARGET0": "iqn", + "DEVICE_WWN": "68ccf098003ceb5e4577a20be6d11bf9", + "LUN_ADDRESS": "1", + "NVMETCPPORTAL0": "192.168.1.1:4420", + "NVMETCPTARGET0": "", }, })) }) @@ -2229,7 +2253,13 @@ var _ = ginkgo.Describe("CSIControllerService", func() { IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, }, }, nil) - + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). + Return([]gopowerstore.IPPoolAddress{ + { + Address: "192.168.1.1", + IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, + }, + }, nil) clientMock.On("GetFCPorts", mock.Anything). Return([]gopowerstore.FcPort{ { @@ -2276,7 +2306,13 @@ var _ = ginkgo.Describe("CSIControllerService", func() { IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, }, }, nil) - + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). + Return([]gopowerstore.IPPoolAddress{ + { + Address: "192.168.1.1", + IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, + }, + }, nil) clientMock.On("GetFCPorts", mock.Anything). Return([]gopowerstore.FcPort{ { @@ -2295,10 +2331,12 @@ var _ = ginkgo.Describe("CSIControllerService", func() { gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(res).To(gomega.Equal(&csi.ControllerPublishVolumeResponse{ PublishContext: map[string]string{ - "PORTAL0": "192.168.1.1:3260", - "TARGET0": "iqn", - "DEVICE_WWN": "68ccf098003ceb5e4577a20be6d11bf9", - "LUN_ADDRESS": "2", + "PORTAL0": "192.168.1.1:3260", + "TARGET0": "iqn", + "DEVICE_WWN": "68ccf098003ceb5e4577a20be6d11bf9", + "LUN_ADDRESS": "2", + "NVMETCPPORTAL0": "192.168.1.1:4420", + "NVMETCPTARGET0": "", }, })) }) diff --git a/pkg/controller/publisher_test.go b/pkg/controller/publisher_test.go index f3cbc414..b4937c77 100644 --- a/pkg/controller/publisher_test.go +++ b/pkg/controller/publisher_test.go @@ -129,6 +129,8 @@ func TestVolumePublisher_Publish(t *testing.T) { Return([]gopowerstore.HostVolumeMapping{}, nil).Once() clientMock.On("GetStorageISCSITargetAddresses", mock.Anything). Return([]gopowerstore.IPPoolAddress{}, e) + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). + Return([]gopowerstore.IPPoolAddress{}, e) clientMock.On("GetFCPorts", mock.Anything). Return([]gopowerstore.FcPort{}, nil) clientMock.On("GetCluster", mock.Anything). diff --git a/pkg/node/node_test.go b/pkg/node/node_test.go index 30e53629..07c6302c 100644 --- a/pkg/node/node_test.go +++ b/pkg/node/node_test.go @@ -28,6 +28,8 @@ import ( "path/filepath" "testing" + "github.com/onsi/ginkgo/reporters" + "github.com/container-storage-interface/spec/lib/go/csi" "github.com/dell/csi-powerstore/v2/mocks" "github.com/dell/csi-powerstore/v2/pkg/array" @@ -43,7 +45,6 @@ import ( "github.com/dell/gopowerstore/api" gopowerstoremock "github.com/dell/gopowerstore/mocks" ginkgo "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/reporters" gomega "github.com/onsi/gomega" "github.com/stretchr/testify/mock" ) @@ -675,8 +676,14 @@ var _ = ginkgo.Describe("CSINodeService", func() { arrays := getTestArrays() clientMock.On("GetStorageISCSITargetAddresses", mock.Anything). Return([]gopowerstore.IPPoolAddress{}, nil) + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). + Return([]gopowerstore.IPPoolAddress{}, nil) + clientMock.On("GetCluster", mock.Anything). + Return(gopowerstore.Cluster{ + Name: validClusterName, + NVMeNQN: validNVMEInitiators[0], + }, nil) err := nodeSvc.nodeProbe(context.Background(), arrays["gid1"]) - gomega.Expect(err.Error()).To(gomega.ContainSubstring("no active iscsi sessions")) }) }) @@ -696,6 +703,13 @@ var _ = ginkgo.Describe("CSINodeService", func() { nodeSvc.useNVME = true clientMock.On("GetStorageISCSITargetAddresses", mock.Anything). Return([]gopowerstore.IPPoolAddress{}, nil) + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). + Return([]gopowerstore.IPPoolAddress{}, nil) + clientMock.On("GetCluster", mock.Anything). + Return(gopowerstore.Cluster{ + Name: validClusterName, + NVMeNQN: validNVMEInitiators[0], + }, nil) err := nodeSvc.nodeProbe(context.Background(), arrays["gid1"]) nodeSvc.useNVME = false gomega.Expect(err.Error()).To(gomega.ContainSubstring("no active nvme sessions")) @@ -719,6 +733,13 @@ var _ = ginkgo.Describe("CSINodeService", func() { }, nil) clientMock.On("GetStorageISCSITargetAddresses", mock.Anything). Return([]gopowerstore.IPPoolAddress{}, nil) + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). + Return([]gopowerstore.IPPoolAddress{}, nil) + clientMock.On("GetCluster", mock.Anything). + Return(gopowerstore.Cluster{ + Name: validClusterName, + NVMeNQN: validNVMEInitiators[0], + }, nil) nodeSvc.useNFS = true arrays := getTestArrays() err := nodeSvc.nodeProbe(context.Background(), arrays["gid1"]) @@ -744,6 +765,13 @@ var _ = ginkgo.Describe("CSINodeService", func() { }, nil) clientMock.On("GetStorageISCSITargetAddresses", mock.Anything). Return([]gopowerstore.IPPoolAddress{}, nil) + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). + Return([]gopowerstore.IPPoolAddress{}, nil) + clientMock.On("GetCluster", mock.Anything). + Return(gopowerstore.Cluster{ + Name: validClusterName, + NVMeNQN: validNVMEInitiators[0], + }, nil) nodeSvc.useNFS = true nodeSvc.useNVME = true arrays := getTestArrays() @@ -885,8 +913,8 @@ var _ = ginkgo.Describe("CSINodeService", func() { }, nil) arrays := getTestArrays() - nodeSvc.startNodeToArrayConnectivityCheck(context.Background()) nodeSvc.iscsiTargets["unique"] = []string{"iqn.2015-10.com.dell:dellemc-foobar-123-a-7ceb34a0"} + nodeSvc.startNodeToArrayConnectivityCheck(context.Background()) err := nodeSvc.nodeProbe(context.Background(), arrays["gid1"]) gomega.Expect(err).To(gomega.BeNil()) @@ -1251,7 +1279,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(4) fsMock.On("ParseProcMounts", context.Background(), mock.Anything). Return(mountInfo, nil).Twice() - fsMock.On("MkFileIDempotent", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)). + fsMock.On("MkFileIdempotent", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)). Return(true, nil).Once() fsMock.On("GetUtil").Return(utilMock) @@ -1407,7 +1435,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { }).Return(gobrick.Device{}, nil) fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIDempotent", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)).Return(true, nil) + fsMock.On("MkFileIdempotent", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)).Return(true, nil) fsMock.On("GetUtil").Return(utilMock) utilMock.On("BindMount", mock.Anything, "/dev", filepath.Join(nodeStagePrivateDir, validBaseVolumeID)).Return(e) @@ -1862,7 +1890,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIDempotent", validTargetPath).Return(true, nil) + fsMock.On("MkFileIdempotent", validTargetPath).Return(true, nil) utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(nil) res, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ @@ -1883,7 +1911,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIDempotent", validTargetPath).Return(true, nil) + fsMock.On("MkFileIdempotent", validTargetPath).Return(true, nil) utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath, "ro").Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ @@ -1903,7 +1931,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIDempotent", validTargetPath).Return(false, errors.New("failed")) + fsMock.On("MkFileIdempotent", validTargetPath).Return(false, errors.New("failed")) utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(nil) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ @@ -1923,7 +1951,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIDempotent", validTargetPath).Return(true, nil) + fsMock.On("MkFileIdempotent", validTargetPath).Return(true, nil) utilMock.On("BindMount", mock.Anything, stagingPath, validTargetPath).Return(errors.New("failed to bind")) _, err := nodeSvc.NodePublishVolume(context.Background(), &csi.NodePublishVolumeRequest{ @@ -2641,7 +2669,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, "/dev", mock.Anything).Return(nil) fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIDempotent", mock.Anything).Return(true, nil) + fsMock.On("MkFileIdempotent", mock.Anything).Return(true, nil) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) fsMock.On("MkdirAll", mock.Anything, mock.Anything).Return(nil) utilMock.On("GetDiskFormat", mock.Anything, mock.Anything).Return("", nil) @@ -2770,7 +2798,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, "/dev", mock.Anything).Return(nil) fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil).Times(2) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIDempotent", mock.Anything).Return(true, errors.New("error")) + fsMock.On("MkFileIdempotent", mock.Anything).Return(true, errors.New("error")) fsMock.On("GetUtil").Return(utilMock) mountInfo := []gofsutil.Info{ @@ -2966,7 +2994,7 @@ var _ = ginkgo.Describe("CSINodeService", func() { utilMock.On("BindMount", mock.Anything, "/dev", mock.Anything).Return(nil) fsMock.On("ReadFile", "/proc/self/mountinfo").Return([]byte{}, nil) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) - fsMock.On("MkFileIDempotent", mock.Anything).Return(true, nil) + fsMock.On("MkFileIdempotent", mock.Anything).Return(true, nil) fsMock.On("ParseProcMounts", context.Background(), mock.Anything).Return([]gofsutil.Info{}, nil) fsMock.On("MkdirAll", mock.Anything, mock.Anything).Return(nil) utilMock.On("GetDiskFormat", mock.Anything, mock.Anything).Return("", nil) @@ -3721,13 +3749,24 @@ var _ = ginkgo.Describe("CSINodeService", func() { ginkgo.It("should return NVMeTCP topology segments", func() { nodeSvc.useNVME = true nodeSvc.useFC = false - clientMock.On("GetStorageISCSITargetAddresses", mock.Anything). + clientMock.On("GetStorageISCSITargetAddresses", mock.Anything).Return([]gopowerstore.IPPoolAddress{ + { + Address: "192.168.1.1", + IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, + }, + }, nil) + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). Return([]gopowerstore.IPPoolAddress{ { Address: "192.168.1.1", IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, }, }, nil) + clientMock.On("GetCluster", mock.Anything). + Return(gopowerstore.Cluster{ + Name: validClusterName, + NVMeNQN: validNVMEInitiators[0], + }, nil) conn, _ := net.Dial("udp", "127.0.0.1:80") fsMock.On("NetDial", mock.Anything).Return( conn, @@ -3756,13 +3795,24 @@ var _ = ginkgo.Describe("CSINodeService", func() { gonvme.GONVMEMock.InduceDiscoveryError = true nodeSvc.useNVME = true nodeSvc.useFC = false - clientMock.On("GetStorageISCSITargetAddresses", mock.Anything). + clientMock.On("GetStorageISCSITargetAddresses", mock.Anything).Return([]gopowerstore.IPPoolAddress{ + { + Address: "192.168.1.1", + IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, + }, + }, nil) + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). Return([]gopowerstore.IPPoolAddress{ { Address: "192.168.1.1", IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, }, }, nil) + clientMock.On("GetCluster", mock.Anything). + Return(gopowerstore.Cluster{ + Name: validClusterName, + NVMeNQN: validNVMEInitiators[0], + }, nil) conn, _ := net.Dial("udp", "127.0.0.1:80") fsMock.On("NetDial", mock.Anything).Return( conn, @@ -3791,8 +3841,19 @@ var _ = ginkgo.Describe("CSINodeService", func() { nodeSvc.useNVME = true nodeSvc.useFC = false e := "internalerror" - clientMock.On("GetStorageISCSITargetAddresses", mock.Anything). + clientMock.On("GetStorageISCSITargetAddresses", mock.Anything).Return([]gopowerstore.IPPoolAddress{ + { + Address: "192.168.1.1", + IPPort: gopowerstore.IPPortInstance{TargetIqn: "iqn"}, + }, + }, nil) + clientMock.On("GetStorageNVMETCPTargetAddresses", mock.Anything). Return([]gopowerstore.IPPoolAddress{}, errors.New(e)) + clientMock.On("GetCluster", mock.Anything). + Return(gopowerstore.Cluster{ + Name: validClusterName, + NVMeNQN: validNVMEInitiators[0], + }, nil) conn, _ := net.Dial("udp", "127.0.0.1:80") fsMock.On("NetDial", mock.Anything).Return( conn,