Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[backport-1.31] [cinder-csi-plugin] Add --with-topology opt #2746

Merged
merged 1 commit into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion cmd/cinder-csi-plugin/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ var (
provideControllerService bool
provideNodeService bool
noClient bool
withTopology bool
)

func main() {
Expand Down Expand Up @@ -68,6 +69,8 @@ func main() {
klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err)
}

cmd.PersistentFlags().BoolVar(&withTopology, "with-topology", true, "cluster is topology-aware")

cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "Cloud name to instruct CSI driver to read additional OpenStack cloud credentials from the configuration subsections. This option can be specified multiple times to manage multiple OpenStack clouds.")
cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additional-topology", map[string]string{}, "Additional CSI driver topology keys, for example topology.kubernetes.io/region=REGION1. This option can be specified multiple times to add multiple additional topology keys.")

Expand All @@ -86,7 +89,11 @@ func main() {

func handle() {
// Initialize cloud
d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster})
d := cinder.NewDriver(&cinder.DriverOpts{
Endpoint: endpoint,
ClusterID: cluster,
WithTopology: withTopology,
})

openstack.InitOpenStackProvider(cloudConfig, httpEndpoint)

Expand Down
17 changes: 12 additions & 5 deletions docs/cinder-csi-plugin/using-cinder-csi-plugin.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,13 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f
The manifests default this to `unix://csi/csi.sock`, which is supplied via the `CSI_ENDPOINT` environment variable.
</dd>

<dt>--with-topology &lt;enabled&gt;</dt>
<dd>
If set to true then the CSI driver reports topology information and the controller respects it.

Defaults to `true` (enabled).
</dd>

<dt>--cloud-config &lt;config file&gt; [--cloud-config &lt;config file&gt; ...]</dt>
<dd>
This argument must be given at least once.
Expand Down Expand Up @@ -100,23 +107,23 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f

<dt>--provide-controller-service &lt;enabled&gt;</dt>
<dd>
If set to true then the CSI driver does provide the controller service.
If set to true then the CSI driver provides the controller service.

The default is to provide the controller service.
Defaults to `true` (enabled).
</dd>

<dt>--provide-node-service &lt;enabled&gt;</dt>
<dd>
If set to true then the CSI driver does provide the node service.
If set to true then the CSI driver provides the node service.

The default is to provide the node service.
Defaults to `true` (enabled).
</dd>

<dt>--node-service-no-os-client &lt;disabled&gt;</dt>
<dd>
If set to true then the CSI driver does not provide the OpenStack client in the node service.

The default is to provide the OpenStack client in the node service.
Defaults to `false` (disabled).
</dd>
</dl>

Expand Down
19 changes: 12 additions & 7 deletions pkg/csi/cinder/controllerserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
// Volume Name
volName := req.GetName()
volCapabilities := req.GetVolumeCapabilities()
volParams := req.GetParameters()

if len(volName) == 0 {
return nil, status.Error(codes.InvalidArgument, "[CreateVolume] missing Volume Name")
Expand All @@ -80,13 +81,17 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
// Volume Type
volType := req.GetParameters()["type"]

// First check if volAvailability is already specified, if not get preferred from Topology
// Required, incase vol AZ is different from node AZ
volAvailability := req.GetParameters()["availability"]
if volAvailability == "" {
// Check from Topology
if req.GetAccessibilityRequirements() != nil {
volAvailability = util.GetAZFromTopology(topologyKey, req.GetAccessibilityRequirements())
var volAvailability string
if cs.Driver.withTopology {
// First check if volAvailability is already specified, if not get preferred from Topology
// Required, incase vol AZ is different from node AZ
volAvailability = volParams["availability"]
if volAvailability == "" {
accessibleTopologyReq := req.GetAccessibilityRequirements()
// Check from Topology
if accessibleTopologyReq != nil {
volAvailability = util.GetAZFromTopology(topologyKey, accessibleTopologyReq)
}
}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/csi/cinder/controllerserver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ func init() {
osmock = new(openstack.OpenStackMock)
osmockRegionX = new(openstack.OpenStackMock)

d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true})

fakeCs = NewControllerServer(d, map[string]openstack.IOpenStack{
"": osmock,
Expand Down
16 changes: 10 additions & 6 deletions pkg/csi/cinder/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,11 @@ type CinderDriver = Driver
//revive:enable:exported

type Driver struct {
name string
fqVersion string //Fully qualified version in format {Version}@{CPO version}
endpoint string
cluster string
name string
fqVersion string //Fully qualified version in format {Version}@{CPO version}
endpoint string
cluster string
withTopology bool

ids *identityServer
cs *controllerServer
Expand All @@ -71,8 +72,9 @@ type Driver struct {
}

type DriverOpts struct {
ClusterID string
Endpoint string
ClusterID string
Endpoint string
WithTopology bool
}

func NewDriver(o *DriverOpts) *Driver {
Expand All @@ -81,10 +83,12 @@ func NewDriver(o *DriverOpts) *Driver {
d.fqVersion = fmt.Sprintf("%s@%s", Version, version.Version)
d.endpoint = o.Endpoint
d.cluster = o.ClusterID
d.withTopology = o.WithTopology

klog.Info("Driver: ", d.name)
klog.Info("Driver version: ", d.fqVersion)
klog.Info("CSI Spec version: ", specVersion)
klog.Infof("Topology awareness: %T", d.withTopology)

d.AddControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
Expand Down
21 changes: 12 additions & 9 deletions pkg/csi/cinder/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,12 +359,21 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
}

func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {

nodeID, err := ns.Metadata.GetInstanceID()
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodeGetInfo] unable to retrieve instance id of node %v", err)
}

maxVolume := ns.Cloud.GetMaxVolLimit()
nodeInfo := &csi.NodeGetInfoResponse{
NodeId: nodeID,
MaxVolumesPerNode: maxVolume,
}

if !ns.Driver.withTopology {
return nodeInfo, nil
}

zone, err := ns.Metadata.GetAvailabilityZone()
if err != nil {
return nil, status.Errorf(codes.Internal, "[NodeGetInfo] Unable to retrieve availability zone of node %v", err)
Expand All @@ -374,15 +383,9 @@ func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque
for k, v := range ns.Topologies {
topologyMap[k] = v
}
topology := &csi.Topology{Segments: topologyMap}

maxVolume := ns.Cloud.GetMaxVolLimit()
nodeInfo.AccessibleTopology = &csi.Topology{Segments: topologyMap}

return &csi.NodeGetInfoResponse{
NodeId: nodeID,
AccessibleTopology: topology,
MaxVolumesPerNode: maxVolume,
}, nil
return nodeInfo, nil
}

func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/csi/cinder/nodeserver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ var omock *openstack.OpenStackMock
func init() {
if fakeNs == nil {

d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster, WithTopology: true})

// mock MountMock
mmock = new(mount.MountMock)
Expand Down
2 changes: 1 addition & 1 deletion tests/sanity/cinder/sanity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func TestDriver(t *testing.T) {
endpoint := "unix://" + socket
cluster := "kubernetes"

d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster})
d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster, WithTopology: true})

fakecloudprovider := getfakecloud()
openstack.OsInstances = map[string]openstack.IOpenStack{
Expand Down