Skip to content

Commit

Permalink
Rewrite SCSI support in new package
Browse files Browse the repository at this point in the history
The existing SCSI implementation in internal/uvm has evolved organically
over time into what it is today. This creates unecessary difficulty when
adding new features to the code, makes it harder to maintain, and has
been a source of bugs.

Additionally, there is a significant functional issue that the current
scsi code tightly couples the idea of attaching a SCSI device to a VM,
with the use/mounting of that device inside the VM. This creates
difficulty when we want to re-use the same SCSI attachment multiple
times, especially in the future when we will need to mount multiple
partitions from a device.

This is addressed here by largely rewriting the shim's SCSI code, and
moving it to a new internal/uvm/scsi package. The new code features a
main Manager type, which delegates to attachManager and mountManager for
tracking of attachments to the VM, and mounting of devices inside the
VM, respectively. attachManager and mountManager also rely on a set of
interfaces for the actual backend implementation of interacting with a
VM. This will also allow for easier testing of the scsi package in
isolation in the future.

One consequence of this change is it is no longer possible for the
caller to request a specific UVM path for a SCSI mount. The support for
this was already kind of a sham, because if the disk was already
mounted, you would get back its existing mount path instead of the one
you wanted, so the caller already had to handle that case. Additionally,
I'm not aware of any reason why the specific location the disk is
mounted is actually relevant. Because of these reasons, and to simplify
the overall package interface, the mount path is determined by the scsi
package, using a format string passed to the Manager at creation time.

Signed-off-by: Kevin Parsons <kevpar@microsoft.com>
  • Loading branch information
kevpar committed Apr 24, 2023
1 parent 0c6b861 commit 8655dd2
Show file tree
Hide file tree
Showing 26 changed files with 1,221 additions and 749 deletions.
14 changes: 7 additions & 7 deletions internal/devices/drivers.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/resources"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/uvm/scsi"
)

// InstallDriver mounts a share from the host into the UVM, installs any kernel drivers in the share,
Expand Down Expand Up @@ -54,19 +55,18 @@ func InstallDrivers(ctx context.Context, vm *uvm.UtilityVM, share string, gpuDri
}

// first mount driver as scsi in standard mount location
uvmPathForShare := fmt.Sprintf(guestpath.LCOWGlobalMountPrefixFmt, vm.UVMMountCounter())
mount, err := vm.AddSCSI(ctx,
mount, err := vm.SCSIManager.AddVirtualDisk(
ctx,
share,
uvmPathForShare,
true,
false,
[]string{},
uvm.VMAccessTypeIndividual)
scsi.IndividualVMAccess(vm.ID()),
&scsi.MountConfig{},
)
if err != nil {
return closer, fmt.Errorf("failed to add SCSI disk to utility VM for path %+v: %s", share, err)
}
closer = mount
uvmPathForShare = mount.UVMPath
uvmPathForShare := mount.GuestPath()

// construct path that the drivers will be remounted as read/write in the UVM

Expand Down
23 changes: 13 additions & 10 deletions internal/hcsoci/resources_lcow.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
"github.com/Microsoft/hcsshim/internal/layers"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/resources"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/uvm/scsi"
)

func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *resources.Resources, isSandbox bool) error {
Expand Down Expand Up @@ -85,13 +85,18 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *
l := log.G(ctx).WithField("mount", fmt.Sprintf("%+v", mount))
if mount.Type == "physical-disk" {
l.Debug("hcsshim::allocateLinuxResources Hot-adding SCSI physical disk for OCI mount")
uvmPathForShare = fmt.Sprintf(guestpath.LCOWGlobalMountPrefixFmt, coi.HostingSystem.UVMMountCounter())
scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, hostPath, uvmPathForShare, readOnly, mount.Options)
scsiMount, err := coi.HostingSystem.SCSIManager.AddPhysicalDisk(
ctx,
hostPath,
readOnly,
scsi.IndividualVMAccess(coi.HostingSystem.ID()),
&scsi.MountConfig{Options: mount.Options},
)
if err != nil {
return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount)
}

uvmPathForFile = scsiMount.UVMPath
uvmPathForFile = scsiMount.GuestPath()
r.Add(scsiMount)
coi.Spec.Mounts[i].Type = "none"
} else if mount.Type == "virtual-disk" {
Expand All @@ -100,20 +105,18 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *

// if the scsi device is already attached then we take the uvm path that the function below returns
// that is where it was previously mounted in UVM
scsiMount, err := coi.HostingSystem.AddSCSI(
scsiMount, err := coi.HostingSystem.SCSIManager.AddVirtualDisk(
ctx,
hostPath,
uvmPathForShare,
readOnly,
false,
mount.Options,
uvm.VMAccessTypeIndividual,
scsi.IndividualVMAccess(coi.HostingSystem.ID()),
&scsi.MountConfig{Options: mount.Options},
)
if err != nil {
return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount)
}

uvmPathForFile = scsiMount.UVMPath
uvmPathForFile = scsiMount.GuestPath()
r.Add(scsiMount)
coi.Spec.Mounts[i].Type = "none"
} else if strings.HasPrefix(mount.Source, guestpath.SandboxMountPrefix) {
Expand Down
27 changes: 12 additions & 15 deletions internal/hcsoci/resources_wcow.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"github.com/Microsoft/hcsshim/internal/resources"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/uvm/scsi"
"github.com/Microsoft/hcsshim/internal/wclayer"
)

Expand Down Expand Up @@ -59,8 +60,7 @@ func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r

if coi.Spec.Root.Path == "" && (coi.HostingSystem != nil || coi.Spec.Windows.HyperV == nil) {
log.G(ctx).Debug("hcsshim::allocateWindowsResources mounting storage")
containerRootInUVM := r.ContainerRootInUVM()
containerRootPath, closer, err := layers.MountWCOWLayers(ctx, coi.actualID, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem)
containerRootPath, closer, err := layers.MountWCOWLayers(ctx, coi.actualID, coi.Spec.Windows.LayerFolders, "", coi.HostingSystem)
if err != nil {
return errors.Wrap(err, "failed to mount container storage")
}
Expand Down Expand Up @@ -146,7 +146,6 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R
}

if coi.HostingSystem != nil && schemaversion.IsV21(coi.actualSchemaVersion) {
uvmPath := fmt.Sprintf(guestpath.WCOWGlobalMountPrefixFmt, coi.HostingSystem.UVMMountCounter())
readOnly := false
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
Expand All @@ -157,37 +156,35 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R
l := log.G(ctx).WithField("mount", fmt.Sprintf("%+v", mount))
if mount.Type == "physical-disk" || mount.Type == "virtual-disk" || mount.Type == "extensible-virtual-disk" {
var (
scsiMount *uvm.SCSIMount
scsiMount *scsi.Mount
err error
)
switch mount.Type {
case "physical-disk":
l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount")
scsiMount, err = coi.HostingSystem.AddSCSIPhysicalDisk(
scsiMount, err = coi.HostingSystem.SCSIManager.AddPhysicalDisk(
ctx,
mount.Source,
uvmPath,
readOnly,
mount.Options,
scsi.IndividualVMAccess(coi.HostingSystem.ID()),
&scsi.MountConfig{Options: mount.Options},
)
case "virtual-disk":
l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount")
scsiMount, err = coi.HostingSystem.AddSCSI(
scsiMount, err = coi.HostingSystem.SCSIManager.AddVirtualDisk(
ctx,
mount.Source,
uvmPath,
readOnly,
false,
mount.Options,
uvm.VMAccessTypeIndividual,
scsi.IndividualVMAccess(coi.HostingSystem.ID()),
&scsi.MountConfig{Options: mount.Options},
)
case "extensible-virtual-disk":
l.Debug("hcsshim::allocateWindowsResource Hot-adding ExtensibleVirtualDisk")
scsiMount, err = coi.HostingSystem.AddSCSIExtensibleVirtualDisk(
scsiMount, err = coi.HostingSystem.SCSIManager.AddExtensibleVirtualDisk(
ctx,
mount.Source,
uvmPath,
readOnly,
&scsi.MountConfig{},
)
}
if err != nil {
Expand All @@ -197,7 +194,7 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R
// Compute guest mounts now, and store them, so they can be added to the container doc later.
// We do this now because we have access to the guest path through the returned mount object.
coi.windowsAdditionalMounts = append(coi.windowsAdditionalMounts, hcsschema.MappedDirectory{
HostPath: scsiMount.UVMPath,
HostPath: scsiMount.GuestPath(),
ContainerPath: mount.Destination,
ReadOnly: readOnly,
})
Expand Down
2 changes: 1 addition & 1 deletion internal/jobcontainers/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func (c *JobContainer) mountLayers(ctx context.Context, containerID string, s *s
if s.Root.Path == "" {
log.G(ctx).Debug("mounting job container storage")
var rootPath string
rootPath, closer, err = layers.MountWCOWLayers(ctx, containerID, s.Windows.LayerFolders, "", volumeMountPath, nil)
rootPath, closer, err = layers.MountWCOWLayers(ctx, containerID, s.Windows.LayerFolders, volumeMountPath, nil)
if err != nil {
return nil, fmt.Errorf("failed to mount job container storage: %w", err)
}
Expand Down
38 changes: 12 additions & 26 deletions internal/layers/layers.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/Microsoft/hcsshim/internal/ospath"
"github.com/Microsoft/hcsshim/internal/resources"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/uvm/scsi"
"github.com/Microsoft/hcsshim/internal/wclayer"
)

Expand Down Expand Up @@ -107,15 +108,12 @@ func MountLCOWLayers(ctx context.Context, containerID string, layerFolders []str
}
log.G(ctx).WithField("hostPath", hostPath).Debug("mounting scratch VHD")

var options []string
scsiMount, err := vm.AddSCSI(
scsiMount, err := vm.SCSIManager.AddVirtualDisk(
ctx,
hostPath,
containerScratchPathInUVM,
false,
vm.ScratchEncryptionEnabled(),
options,
uvm.VMAccessTypeIndividual,
scsi.IndividualVMAccess(vm.ID()),
&scsi.MountConfig{Encrypted: vm.ScratchEncryptionEnabled()},
)
if err != nil {
return "", "", nil, fmt.Errorf("failed to add SCSI scratch VHD: %s", err)
Expand All @@ -124,7 +122,7 @@ func MountLCOWLayers(ctx context.Context, containerID string, layerFolders []str
// handles the case where we want to share a scratch disk for multiple containers instead
// of mounting a new one. Pass a unique value for `ScratchPath` to avoid container upper and
// work directories colliding in the UVM.
containerScratchPathInUVM = ospath.Join("linux", scsiMount.UVMPath, "scratch", containerID)
containerScratchPathInUVM = ospath.Join("linux", scsiMount.GuestPath(), "scratch", containerID)

defer func() {
if err != nil {
Expand Down Expand Up @@ -160,15 +158,15 @@ func MountLCOWLayers(ctx context.Context, containerID string, layerFolders []str
//
// Job container: Returns the mount path on the host as a volume guid, with the volume mounted on
// the host at `volumeMountPath`.
func MountWCOWLayers(ctx context.Context, containerID string, layerFolders []string, guestRoot, volumeMountPath string, vm *uvm.UtilityVM) (_ string, _ resources.ResourceCloser, err error) {
func MountWCOWLayers(ctx context.Context, containerID string, layerFolders []string, volumeMountPath string, vm *uvm.UtilityVM) (_ string, _ resources.ResourceCloser, err error) {
if vm.OS() != "windows" {
return "", nil, errors.New("MountWCOWLayers should only be called for WCOW")
}

if vm == nil {
return mountWCOWHostLayers(ctx, layerFolders, volumeMountPath)
}
return mountWCOWIsolatedLayers(ctx, containerID, layerFolders, guestRoot, volumeMountPath, vm)
return mountWCOWIsolatedLayers(ctx, containerID, layerFolders, volumeMountPath, vm)
}

type wcowHostLayersCloser struct {
Expand Down Expand Up @@ -305,7 +303,7 @@ func (lc *wcowIsolatedLayersCloser) Release(ctx context.Context) (retErr error)
return
}

func mountWCOWIsolatedLayers(ctx context.Context, containerID string, layerFolders []string, guestRoot, volumeMountPath string, vm *uvm.UtilityVM) (_ string, _ resources.ResourceCloser, err error) {
func mountWCOWIsolatedLayers(ctx context.Context, containerID string, layerFolders []string, volumeMountPath string, vm *uvm.UtilityVM) (_ string, _ resources.ResourceCloser, err error) {
log.G(ctx).WithField("os", vm.OS()).Debug("hcsshim::MountWCOWLayers V2 UVM")

var (
Expand Down Expand Up @@ -334,27 +332,17 @@ func mountWCOWIsolatedLayers(ctx context.Context, containerID string, layerFolde
layerClosers = append(layerClosers, mount)
}

containerScratchPathInUVM := ospath.Join(vm.OS(), guestRoot)
hostPath, err := getScratchVHDPath(layerFolders)
if err != nil {
return "", nil, fmt.Errorf("failed to get scratch VHD path in layer folders: %s", err)
}
log.G(ctx).WithField("hostPath", hostPath).Debug("mounting scratch VHD")

var options []string
scsiMount, err := vm.AddSCSI(
ctx,
hostPath,
containerScratchPathInUVM,
false,
vm.ScratchEncryptionEnabled(),
options,
uvm.VMAccessTypeIndividual,
)
scsiMount, err := vm.SCSIManager.AddVirtualDisk(ctx, hostPath, false, scsi.IndividualVMAccess(vm.ID()), &scsi.MountConfig{})
if err != nil {
return "", nil, fmt.Errorf("failed to add SCSI scratch VHD: %s", err)
}
containerScratchPathInUVM = scsiMount.UVMPath
containerScratchPathInUVM := scsiMount.GuestPath()

defer func() {
if err != nil {
Expand Down Expand Up @@ -402,17 +390,15 @@ func addLCOWLayer(ctx context.Context, vm *uvm.UtilityVM, layerPath string) (uvm
}
}

options := []string{"ro"}
uvmPath = fmt.Sprintf(guestpath.LCOWGlobalMountPrefixFmt, vm.UVMMountCounter())
sm, err := vm.AddSCSI(ctx, layerPath, uvmPath, true, false, options, uvm.VMAccessTypeNoop)
sm, err := vm.SCSIManager.AddVirtualDisk(ctx, layerPath, true, nil, &scsi.MountConfig{Options: []string{"ro"}})
if err != nil {
return "", nil, fmt.Errorf("failed to add SCSI layer: %s", err)
}
log.G(ctx).WithFields(logrus.Fields{
"layerPath": layerPath,
"layerType": "scsi",
}).Debug("Added LCOW layer")
return sm.UVMPath, sm, nil
return sm.GuestPath(), sm, nil
}

// GetHCSLayers converts host paths corresponding to container layers into HCS schema V2 layers
Expand Down
7 changes: 4 additions & 3 deletions internal/lcow/disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (

"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/uvm/scsi"
"github.com/sirupsen/logrus"
)

Expand All @@ -30,8 +31,8 @@ func FormatDisk(ctx context.Context, lcowUVM *uvm.UtilityVM, destPath string) er
"dest": destPath,
}).Debug("lcow::FormatDisk opts")

var options []string
scsi, err := lcowUVM.AddSCSIPhysicalDisk(ctx, destPath, "", false, options) // No destination as not formatted
// Attach without mounting.
scsi, err := lcowUVM.SCSIManager.AddPhysicalDisk(ctx, destPath, false, scsi.IndividualVMAccess(lcowUVM.ID()), nil)
if err != nil {
return err
}
Expand All @@ -46,7 +47,7 @@ func FormatDisk(ctx context.Context, lcowUVM *uvm.UtilityVM, destPath string) er
"lun": scsi.LUN,
}).Debug("lcow::FormatDisk device attached")

if err := formatDiskUvm(ctx, lcowUVM, scsi.Controller, scsi.LUN, destPath); err != nil {
if err := formatDiskUvm(ctx, lcowUVM, int(scsi.Controller()), int32(scsi.LUN()), destPath); err != nil {
return err
}
log.G(ctx).WithField("dest", destPath).Debug("lcow::FormatDisk complete")
Expand Down
20 changes: 9 additions & 11 deletions internal/lcow/scratch.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import (
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/uvm/scsi"
"github.com/sirupsen/logrus"
)

Expand Down Expand Up @@ -68,34 +69,31 @@ func CreateScratch(ctx context.Context, lcowUVM *uvm.UtilityVM, destFile string,
return fmt.Errorf("failed to create VHDx %s: %s", destFile, err)
}

var options []string
scsi, err := lcowUVM.AddSCSI(
scsi, err := lcowUVM.SCSIManager.AddVirtualDisk(
ctx,
destFile,
"", // No destination as not formatted
false,
lcowUVM.ScratchEncryptionEnabled(),
options,
uvm.VMAccessTypeIndividual,
scsi.IndividualVMAccess(lcowUVM.ID()),
nil, // Attach without mounting.
)
if err != nil {
return err
}
removeSCSI := true
defer func() {
if removeSCSI {
_ = lcowUVM.RemoveSCSI(ctx, destFile)
scsi.Release(ctx)
}
}()

log.G(ctx).WithFields(logrus.Fields{
"dest": destFile,
"controller": scsi.Controller,
"lun": scsi.LUN,
"controller": scsi.Controller(),
"lun": scsi.LUN(),
}).Debug("lcow::CreateScratch device attached")

// Validate /sys/bus/scsi/devices/C:0:0:L exists as a directory
devicePath := fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d/block", scsi.Controller, scsi.LUN)
devicePath := fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d/block", scsi.Controller(), scsi.LUN())
testdCtx, cancel := context.WithTimeout(ctx, timeout.TestDRetryLoop)
defer cancel()
for {
Expand Down Expand Up @@ -138,7 +136,7 @@ func CreateScratch(ctx context.Context, lcowUVM *uvm.UtilityVM, destFile string,

// Hot-Remove before we copy it
removeSCSI = false
if err := lcowUVM.RemoveSCSI(ctx, destFile); err != nil {
if err := scsi.Release(ctx); err != nil {
return fmt.Errorf("failed to hot-remove: %s", err)
}

Expand Down
Loading

0 comments on commit 8655dd2

Please sign in to comment.