-
Notifications
You must be signed in to change notification settings - Fork 95
Avoiding redundant mounts by using /proc/mount info #1224
Changes from all commits
be205c1
e88907c
62782d9
51468d6
f7e3360
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -67,10 +67,11 @@ function check_files { | |
} | ||
|
||
function check_recovery_record { | ||
line=`tail -10 /var/log/docker-volume-vsphere.log | $GREP 'Volume name=' | $GREP 'mounted=true'` | ||
expected="name=$vname count=$count mounted=true" | ||
# log contains refcounting attempts and after success logs summary. | ||
line=`tail -50 /var/log/docker-volume-vsphere.log | $GREP 'Volume name=' | $GREP 'mounted=true'` | ||
expected="count=$count mounted=true" | ||
|
||
echo $line | $GREP -q "$expected" ; if [ $? -ne 0 ] ; then | ||
echo $line | $GREP "$vname" | $GREP -q "$expected" ; if [ $? -ne 0 ] ; then | ||
echo Found: \"$line\" | ||
echo Expected pattern: \"$expected\" | ||
return 1 | ||
|
@@ -80,20 +81,21 @@ function check_recovery_record { | |
|
||
function test_crash_recovery { | ||
timeout=$1 | ||
echo "Checking recovery for VMDK plugin kill -9" | ||
kill -9 `pidof docker-volume-vsphere` | ||
until pids=$(pidof docker-volume-vsphere) | ||
echo "Checking recovery through docker kill" | ||
# kill docker daemon forcefully | ||
pkill -9 dockerd | ||
until pids=$(pidof dockerd) | ||
do | ||
echo "Waiting for docker-volume-vsphere to restart" | ||
echo "Waiting for docker to restart" | ||
sleep 1 | ||
done | ||
|
||
echo "Waiting for plugin init" | ||
sleep 3 | ||
sleep 5 | ||
sync # give log the time to flush | ||
wait_for check_recovery_record $timeout | ||
if [ "$?" -ne 0 ] ; then | ||
echo PLUGIN RESTART TEST FAILED. Did not find proper recovery record | ||
echo DOCKER RESTART TEST FAILED. Did not find proper recovery record | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not sure this change is needed. The test is to see if the plugin restarts then the refcounts are restored. If the plugin restart isn't possible then this test should be made manual to execute with a standalone binary and deprecated from automated test run. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This test will in turn test plugin restart (docker restart). Plugin restart through docker restart tests the docker api consumption, timeout and handling of parallel mounts/unmounts till refcounting is successful. Not sure why not have this as automated test? |
||
exit 1 | ||
fi | ||
} | ||
|
@@ -115,8 +117,9 @@ fi | |
echo "$(docker volume ls)" | ||
for i in `seq 1 $count` | ||
do | ||
$DOCKER run -d -v $vname:/v busybox sh -c "touch /v/file$i; sync ; \ | ||
sleep $timeout" | ||
# run containers with restart flag so they restart after docker restart | ||
$DOCKER run -d --restart=always -v $vname:/v busybox sh -c "touch /v/file$i; sync ; \ | ||
while true; do sleep $timeout; done" | ||
done | ||
|
||
echo "Checking the last refcount and mount record" | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -38,6 +38,7 @@ import ( | |
log "github.com/Sirupsen/logrus" | ||
"github.com/docker/go-plugins-helpers/volume" | ||
"github.com/vmware/docker-volume-vsphere/vmdk_plugin/utils/fs" | ||
"github.com/vmware/docker-volume-vsphere/vmdk_plugin/utils/plugin_utils" | ||
"github.com/vmware/docker-volume-vsphere/vmdk_plugin/utils/refcount" | ||
"github.com/vmware/photon-controller-go-sdk/photon" | ||
) | ||
|
@@ -56,12 +57,13 @@ const ( | |
|
||
// VolumeDriver - Photon volume driver struct | ||
type VolumeDriver struct { | ||
client *photon.Client | ||
hostID string | ||
mountRoot string | ||
project string | ||
refCounts *refcount.RefCountsMap | ||
target string | ||
client *photon.Client | ||
hostID string | ||
mountRoot string | ||
project string | ||
refCounts *refcount.RefCountsMap | ||
target string | ||
mountIDtoName map[string]string // map of mountID -> full volume name | ||
} | ||
|
||
func (d *VolumeDriver) verifyTarget() error { | ||
|
@@ -95,6 +97,7 @@ func NewVolumeDriver(targetURL string, projectID string, hostID string, mountDir | |
d.mountRoot = mountDir | ||
d.refCounts = refcount.NewRefCountsMap() | ||
d.refCounts.Init(d, mountDir, driverName) | ||
d.mountIDtoName = make(map[string]string) | ||
|
||
log.WithFields(log.Fields{ | ||
"version": version, | ||
|
@@ -360,8 +363,22 @@ func (d *VolumeDriver) MountVolume(name string, fstype string, id string, isRead | |
return mountpoint, fs.MountWithID(mountpoint, fstype, id, isReadOnly) | ||
} | ||
|
||
// VolumesInRefMap - get list of volumes names from refmap | ||
// names are in format volume@datastore | ||
func (d *VolumeDriver) VolumesInRefMap() []string { | ||
return d.refCounts.GetVolumeNames() | ||
} | ||
|
||
// private function that does the job of mounting volume in conjunction with refcounting | ||
func (d *VolumeDriver) processMount(r volume.MountRequest) volume.Response { | ||
volumeInfo, err := plugin_utils.GetVolumeInfo(r.Name, "", d) | ||
if err != nil { | ||
log.Errorf("Unable to get volume info for volume %s. err:%v", r.Name, err) | ||
return volume.Response{Err: err.Error()} | ||
} | ||
r.Name = volumeInfo.VolumeName | ||
d.mountIDtoName[r.ID] = r.Name | ||
|
||
// If the volume is already mounted , just increase the refcount. | ||
// Note: for new keys, GO maps return zero value, so no need for if_exists. | ||
refcnt := d.incrRefCount(r.Name) // save map traversal | ||
|
@@ -373,22 +390,28 @@ func (d *VolumeDriver) processMount(r volume.MountRequest) volume.Response { | |
return volume.Response{Mountpoint: d.getMountPoint(r.Name)} | ||
} | ||
|
||
// There can be redundant mounts till refcounts are properly initialized | ||
// TODO: #1220 | ||
status, err := d.GetVolume(r.Name) | ||
if err != nil { | ||
d.decrRefCount(r.Name) | ||
return volume.Response{Err: err.Error()} | ||
if plugin_utils.AlreadyMounted(r.Name, d.mountRoot) { | ||
log.WithFields(log.Fields{"name": r.Name}).Info("Already mounted, skipping mount. ") | ||
return volume.Response{Mountpoint: d.getMountPoint(r.Name)} | ||
} | ||
|
||
// get volume metadata if required | ||
volumeMeta := volumeInfo.VolumeMeta | ||
if volumeMeta == nil { | ||
if volumeMeta, err = d.GetVolume(r.Name); err != nil { | ||
d.decrRefCount(r.Name) | ||
return volume.Response{Err: err.Error()} | ||
} | ||
} | ||
|
||
fstype, exists := status[fsTypeTag] | ||
fstype, exists := volumeMeta[fsTypeTag] | ||
if !exists { | ||
fstype = fs.FstypeDefault | ||
} | ||
|
||
skipAttach := false | ||
// If the volume is already attached to the VM, skip the attach. | ||
if state, stateExists := status["State"]; stateExists { | ||
if state, stateExists := volumeMeta["State"]; stateExists { | ||
if strings.Compare(state.(string), "DETACHED") != 0 { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is not your change but please file an issue and let's revisit. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
skipAttach = true | ||
} | ||
|
@@ -398,7 +421,7 @@ func (d *VolumeDriver) processMount(r volume.MountRequest) volume.Response { | |
} | ||
|
||
// Mount the volume and for now its always read-write. | ||
mountpoint, err := d.MountVolume(r.Name, fstype.(string), status["ID"].(string), false, skipAttach) | ||
mountpoint, err := d.MountVolume(r.Name, fstype.(string), volumeMeta["ID"].(string), false, skipAttach) | ||
if err != nil { | ||
log.WithFields( | ||
log.Fields{"name": r.Name, "error": err.Error()}, | ||
|
@@ -618,6 +641,18 @@ func (d *VolumeDriver) Unmount(r volume.UnmountRequest) volume.Response { | |
return volume.Response{Err: ""} | ||
} | ||
|
||
if fullVolName, exist := d.mountIDtoName[r.ID]; exist { | ||
r.Name = fullVolName | ||
delete(d.mountIDtoName, r.ID) //cleanup the map | ||
} else { | ||
volumeInfo, err := plugin_utils.GetVolumeInfo(r.Name, "", d) | ||
if err != nil { | ||
log.Errorf("Unable to get volume info for volume %s. err:%v", r.Name, err) | ||
return volume.Response{Err: err.Error()} | ||
} | ||
r.Name = volumeInfo.VolumeName | ||
} | ||
|
||
// if refcount has been succcessful, Normal flow. | ||
// if the volume is still used by other containers, just return OK | ||
refcnt, err := d.decrRefCount(r.Name) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How does this impact the refcount module. Unless the plugin is restarted, the refcount module hasn't lost it's refcounts to do a recovery.