Skip to content

Commit

Permalink
feat: add mountOptions parameter for inline volume
Browse files Browse the repository at this point in the history
  • Loading branch information
andyzhangx committed Jan 11, 2022
1 parent 6028557 commit 010fbb4
Show file tree
Hide file tree
Showing 9 changed files with 34 additions and 26 deletions.
2 changes: 1 addition & 1 deletion charts/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
| `driver.name` | alternative driver name | `nfs.csi.k8s.io` |
| `driver.mountPermissions` | mounted folder permissions name | `0777`
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `false` |
| `feature.enableInlineVolume` | enable inline volume | `true` |
| `feature.enableInlineVolume` | enable inline volume | `false` |
| `image.nfs.repository` | csi-driver-nfs image | `mcr.microsoft.com/k8s/csi/nfs-csi` |
| `image.nfs.tag` | csi-driver-nfs image tag | `latest` |
| `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | `IfNotPresent` |
Expand Down
Binary file modified charts/latest/csi-driver-nfs-v3.1.0.tgz
Binary file not shown.
2 changes: 1 addition & 1 deletion charts/latest/csi-driver-nfs/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ driver:

feature:
enableFSGroupPolicy: false
enableInlineVolume: true
enableInlineVolume: false

controller:
name: csi-nfs-controller
Expand Down
5 changes: 3 additions & 2 deletions deploy/example/nginx-pod-inline-volume.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,5 +21,6 @@ spec:
csi:
driver: nfs.csi.k8s.io
volumeAttributes:
server: nfs-server.default.svc.cluster.local # required
share: / # required
server: nfs-server.default.svc.cluster.local # required
share: / # required
mountOptions: "nfsvers=4.1" # optional
16 changes: 10 additions & 6 deletions pkg/nfs/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ type NodeServer struct {

// NodePublishVolume mount the volume
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
if req.GetVolumeCapability() == nil {
volCap := req.GetVolumeCapability()
if volCap == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
}
volumeID := req.GetVolumeId()
Expand All @@ -49,6 +50,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path not provided")
}
mountOptions := volCap.GetMount().GetMountFlags()
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}

var server, baseDir string
for k, v := range req.GetVolumeContext() {
Expand All @@ -57,6 +62,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
server = v
case paramShare:
baseDir = v
case mountOptionsField:
if v != "" {
mountOptions = append(mountOptions, v)
}
}
}

Expand All @@ -83,11 +92,6 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return &csi.NodePublishVolumeResponse{}, nil
}

mountOptions := req.GetVolumeCapability().GetMount().GetMountFlags()
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}

klog.V(2).Infof("NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)", volumeID, source, targetPath, mountOptions)
err = ns.mounter.Mount(source, targetPath, "nfs", mountOptions)
if err != nil {
Expand Down
11 changes: 6 additions & 5 deletions test/e2e/dynamic_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,11 +275,12 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
}

test := testsuites.DynamicallyProvisionedInlineVolumeTest{
CSIDriver: testDriver,
Pods: pods,
Server: nfsServerAddress,
Share: nfsShare,
ReadOnly: false,
CSIDriver: testDriver,
Pods: pods,
Server: nfsServerAddress,
Share: nfsShare,
MountOptions: "nfsvers=4.1",
ReadOnly: false,
}
test.Run(cs, ns)
})
Expand Down
13 changes: 7 additions & 6 deletions test/e2e/testsuites/dynamically_provisioned_inline_volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,19 @@ import (
// Waiting for the PV provisioner to create an inline volume
// Testing if the Pod(s) Cmd is run with a 0 exit code
type DynamicallyProvisionedInlineVolumeTest struct {
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
Server string
Share string
ReadOnly bool
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
Server string
Share string
MountOptions string
ReadOnly bool
}

func (t *DynamicallyProvisionedInlineVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods {
var tpod *TestPod
var cleanup []func()
tpod, cleanup = pod.SetupWithCSIInlineVolumes(client, namespace, t.CSIDriver, t.Server, t.Share, t.ReadOnly)
tpod, cleanup = pod.SetupWithCSIInlineVolumes(client, namespace, t.CSIDriver, t.Server, t.Share, t.MountOptions, t.ReadOnly)
// defer must be called here for resources not get removed before using them
for i := range cleanup {
defer cleanup[i]()
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/testsuites/specs.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,11 +123,11 @@ func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, names
return tpod, cleanupFuncs
}

func (pod *PodDetails) SetupWithCSIInlineVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, server, share string, readOnly bool) (*TestPod, []func()) {
func (pod *PodDetails) SetupWithCSIInlineVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, server, share, mountOptions string, readOnly bool) (*TestPod, []func()) {
tpod := NewTestPod(client, namespace, pod.Cmd)
cleanupFuncs := make([]func(), 0)
for n, v := range pod.Volumes {
tpod.SetupCSIInlineVolume(fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), server, share, readOnly)
tpod.SetupCSIInlineVolume(fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), server, share, mountOptions, readOnly)
}
return tpod, cleanupFuncs
}
Expand Down
7 changes: 4 additions & 3 deletions test/e2e/testsuites/testsuites.go
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,7 @@ func (t *TestPod) SetupVolumeMountWithSubpath(pvc *v1.PersistentVolumeClaim, nam
t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
}

func (t *TestPod) SetupCSIInlineVolume(name, mountPath, server, share string, readOnly bool) {
func (t *TestPod) SetupCSIInlineVolume(name, mountPath, server, share, mountOptions string, readOnly bool) {
volumeMount := v1.VolumeMount{
Name: name,
MountPath: mountPath,
Expand All @@ -610,8 +610,9 @@ func (t *TestPod) SetupCSIInlineVolume(name, mountPath, server, share string, re
CSI: &v1.CSIVolumeSource{
Driver: nfs.DefaultDriverName,
VolumeAttributes: map[string]string{
"server": server,
"share": share,
"server": server,
"share": share,
"mountOptions": mountOptions,
},
ReadOnly: &readOnly,
},
Expand Down

0 comments on commit 010fbb4

Please sign in to comment.