From 46562822f9408a6cbf86512fe849a88c1ff67903 Mon Sep 17 00:00:00 2001 From: ambarve <57150885+ambarve@users.noreply.github.com> Date: Mon, 28 Mar 2022 16:04:53 -0700 Subject: [PATCH] Support for multiple SCSI controllers (#1328) * Support for multiple SCSI controllers Enable using upto 4 SCSI controllers for LCOW UVMs. HCS currently doesn't respect the SCSI controller number provided with the Add SCSI disk requests. Hence, the SCSI disk can show up at some different controller inside the LCOW UVM. To avoid this, now we use GUIDs to represent each controller and use that GUID with the Add SCSI disk request. GCS code is also modified to identify the controller number from the controller GUID. Now if a LCOW pod is created with an annotation that sets VPMEM device count to 0, we will automatically enable 4 SCSI controllers. Even the rootfs.vhd will be attached via SCSI in that scenario. Signed-off-by: Amit Barve --- guest/storage/scsi/scsi.go | 107 +++++++++++++++++++++++++--- guest/storage/scsi/scsi_test.go | 38 +++++----- protocol/guestrequest/types.go | 13 ++++ protocol/guestrequest/types_test.go | 15 ++++ uvm/constants.go | 5 ++ uvm/create.go | 15 ++-- uvm/create_lcow.go | 107 ++++++++++++++++------------ uvm/create_wcow.go | 27 +++---- uvm/scsi.go | 16 ++--- 9 files changed, 239 insertions(+), 104 deletions(-) create mode 100644 protocol/guestrequest/types_test.go diff --git a/guest/storage/scsi/scsi.go b/guest/storage/scsi/scsi.go index f5231dfb9d..7af2db325d 100644 --- a/guest/storage/scsi/scsi.go +++ b/guest/storage/scsi/scsi.go @@ -8,7 +8,10 @@ import ( "fmt" "io/ioutil" "os" + "path" "path/filepath" + "strconv" + "strings" "time" "github.com/pkg/errors" @@ -20,6 +23,7 @@ import ( dm "github.com/Microsoft/hcsshim/internal/guest/storage/devicemapper" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/protocol/guestresource" "github.com/Microsoft/hcsshim/pkg/securitypolicy" ) @@ -39,11 +43,45 @@ var ( ) const ( - scsiDevicesPath = "/sys/bus/scsi/devices" - verityDeviceFmt = "verity-scsi-contr%d-lun%d-%s" + scsiDevicesPath = "/sys/bus/scsi/devices" + vmbusDevicesPath = "/sys/bus/vmbus/devices" + verityDeviceFmt = "verity-scsi-contr%d-lun%d-%s" ) -// Mount creates a mount from the SCSI device on `controller` index `lun` to +// fetchActualControllerNumber retrieves the actual controller number assigned to a SCSI controller +// with number `passedController`. +// When HCS creates the UVM it adds 4 SCSI controllers to the UVM but the 1st SCSI +// controller according to HCS can actually show up as 2nd, 3rd or 4th controller inside +// the UVM. So the i'th controller from HCS' perspective could actually be j'th controller +// inside the UVM. However, we can refer to the SCSI controllers with their GUIDs (that +// are hardcoded) and then using that GUID find out the SCSI controller number inside the +// guest. This function does exactly that. +func fetchActualControllerNumber(ctx context.Context, passedController uint8) (uint8, error) { + // find the controller number by looking for a file named host (e.g host1, host3 etc.) + // `N` is the controller number. + // Full file path would be /sys/bus/vmbus/devices//host. + controllerDirPath := path.Join(vmbusDevicesPath, guestrequest.ScsiControllerGuids[passedController]) + entries, err := ioutil.ReadDir(controllerDirPath) + if err != nil { + return 0, err + } + + for _, entry := range entries { + baseName := path.Base(entry.Name()) + if !strings.HasPrefix(baseName, "host") { + continue + } + controllerStr := baseName[len("host"):] + controllerNum, err := strconv.ParseUint(controllerStr, 10, 8) + if err != nil { + return 0, fmt.Errorf("failed to parse controller number from %s: %w", baseName, err) + } + return uint8(controllerNum), nil + } + return 0, fmt.Errorf("host directory not found inside %s", controllerDirPath) +} + +// mount creates a mount from the SCSI device on `controller` index `lun` to // `target` // // `target` will be created. On mount failure the created `target` will be @@ -51,7 +89,7 @@ const ( // // If `encrypted` is set to true, the SCSI device will be encrypted using // dm-crypt. -func Mount( +func mount( ctx context.Context, controller, lun uint8, @@ -159,10 +197,30 @@ func Mount( return nil } -// Unmount unmounts a SCSI device mounted at `target`. +// Mount is just a wrapper over actual mount call. This wrapper finds out the controller +// number from the controller GUID string and calls mount. +func Mount( + ctx context.Context, + controller, + lun uint8, + target string, + readonly bool, + encrypted bool, + options []string, + verityInfo *guestresource.DeviceVerityInfo, + securityPolicy securitypolicy.SecurityPolicyEnforcer, +) (err error) { + cNum, err := fetchActualControllerNumber(ctx, controller) + if err != nil { + return err + } + return mount(ctx, cNum, lun, target, readonly, encrypted, options, verityInfo, securityPolicy) +} + +// unmount unmounts a SCSI device mounted at `target`. // // If `encrypted` is true, it removes all its associated dm-crypto state. -func Unmount( +func unmount( ctx context.Context, controller, lun uint8, @@ -206,6 +264,24 @@ func Unmount( return nil } +// Unmount is just a wrapper over actual unmount call. This wrapper finds out the controller +// number from the controller GUID string and calls mount. +func Unmount( + ctx context.Context, + controller, + lun uint8, + target string, + encrypted bool, + verityInfo *guestresource.DeviceVerityInfo, + securityPolicy securitypolicy.SecurityPolicyEnforcer, +) (err error) { + cNum, err := fetchActualControllerNumber(ctx, controller) + if err != nil { + return err + } + return unmount(ctx, cNum, lun, target, encrypted, verityInfo, securityPolicy) +} + // ControllerLunToName finds the `/dev/sd*` path to the SCSI device on // `controller` index `lun`. func ControllerLunToName(ctx context.Context, controller, lun uint8) (_ string, err error) { @@ -217,8 +293,7 @@ func ControllerLunToName(ctx context.Context, controller, lun uint8) (_ string, trace.Int64Attribute("controller", int64(controller)), trace.Int64Attribute("lun", int64(lun))) - scsiID := fmt.Sprintf("0:0:%d:%d", controller, lun) - + scsiID := fmt.Sprintf("%d:0:0:%d", controller, lun) // Devices matching the given SCSI code should each have a subdirectory // under /sys/bus/scsi/devices//block. blockPath := filepath.Join(scsiDevicesPath, scsiID, "block") @@ -249,11 +324,11 @@ func ControllerLunToName(ctx context.Context, controller, lun uint8) (_ string, return devicePath, nil } -// UnplugDevice finds the SCSI device on `controller` index `lun` and issues a +// unplugDevice finds the SCSI device on `controller` index `lun` and issues a // guest initiated unplug. // // If the device is not attached returns no error. -func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) { +func unplugDevice(ctx context.Context, controller, lun uint8) (err error) { _, span := trace.StartSpan(ctx, "scsi::UnplugDevice") defer span.End() defer func() { oc.SetSpanStatus(span, err) }() @@ -262,7 +337,7 @@ func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) { trace.Int64Attribute("controller", int64(controller)), trace.Int64Attribute("lun", int64(lun))) - scsiID := fmt.Sprintf("0:0:%d:%d", controller, lun) + scsiID := fmt.Sprintf("%d:0:0:%d", controller, lun) f, err := os.OpenFile(filepath.Join(scsiDevicesPath, scsiID, "delete"), os.O_WRONLY, 0644) if err != nil { if os.IsNotExist(err) { @@ -277,3 +352,13 @@ func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) { } return nil } + +// UnplugDevice is just a wrapper over actual unplugDevice call. This wrapper finds out the controller +// number from the controller GUID string and calls unplugDevice. +func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) { + cNum, err := fetchActualControllerNumber(ctx, controller) + if err != nil { + return err + } + return unplugDevice(ctx, cNum, lun) +} diff --git a/guest/storage/scsi/scsi_test.go b/guest/storage/scsi/scsi_test.go index 4ac33ebcb4..c4cb2f55ae 100644 --- a/guest/storage/scsi/scsi_test.go +++ b/guest/storage/scsi/scsi_test.go @@ -37,7 +37,7 @@ func Test_Mount_Mkdir_Fails_Error(t *testing.T) { return "", nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -74,7 +74,7 @@ func Test_Mount_Mkdir_ExpectedPath(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -111,7 +111,7 @@ func Test_Mount_Mkdir_ExpectedPerm(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -148,7 +148,7 @@ func Test_Mount_ControllerLunToName_Valid_Controller(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), expectedController, 0, @@ -185,7 +185,7 @@ func Test_Mount_ControllerLunToName_Valid_Lun(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, expectedLun, @@ -225,7 +225,7 @@ func Test_Mount_Calls_RemoveAll_OnMountFailure(t *testing.T) { return expectedErr } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -263,7 +263,7 @@ func Test_Mount_Valid_Source(t *testing.T) { } return nil } - err := Mount(context.Background(), 0, 0, "/fake/path", false, false, nil, nil, openDoorSecurityPolicyEnforcer()) + err := mount(context.Background(), 0, 0, "/fake/path", false, false, nil, nil, openDoorSecurityPolicyEnforcer()) if err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -290,7 +290,7 @@ func Test_Mount_Valid_Target(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -326,7 +326,7 @@ func Test_Mount_Valid_FSType(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -362,7 +362,7 @@ func Test_Mount_Valid_Flags(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -398,7 +398,7 @@ func Test_Mount_Readonly_Valid_Flags(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -433,7 +433,7 @@ func Test_Mount_Valid_Data(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -469,7 +469,7 @@ func Test_Mount_Readonly_Valid_Data(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -506,7 +506,7 @@ func Test_Read_Only_Security_Policy_Enforcement_Mount_Calls(t *testing.T) { } enforcer := mountMonitoringSecurityPolicyEnforcer() - err := Mount(context.Background(), 0, 0, target, true, false, nil, nil, enforcer) + err := mount(context.Background(), 0, 0, target, true, false, nil, nil, enforcer) if err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -549,7 +549,7 @@ func Test_Read_Write_Security_Policy_Enforcement_Mount_Calls(t *testing.T) { } enforcer := mountMonitoringSecurityPolicyEnforcer() - err := Mount(context.Background(), 0, 0, target, false, false, nil, nil, enforcer) + err := mount(context.Background(), 0, 0, target, false, false, nil, nil, enforcer) if err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -592,12 +592,12 @@ func Test_Security_Policy_Enforcement_Unmount_Calls(t *testing.T) { } enforcer := mountMonitoringSecurityPolicyEnforcer() - err := Mount(context.Background(), 0, 0, target, true, false, nil, nil, enforcer) + err := mount(context.Background(), 0, 0, target, true, false, nil, nil, enforcer) if err != nil { t.Fatalf("expected nil err, got: %v", err) } - err = Unmount(context.Background(), 0, 0, target, false, nil, enforcer) + err = unmount(context.Background(), 0, 0, target, false, nil, enforcer) if err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -668,7 +668,7 @@ func Test_CreateVerityTarget_And_Mount_Called_With_Correct_Parameters(t *testing return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, @@ -717,7 +717,7 @@ func Test_osMkdirAllFails_And_RemoveDevice_Called(t *testing.T) { return nil } - if err := Mount( + if err := mount( context.Background(), 0, 0, diff --git a/protocol/guestrequest/types.go b/protocol/guestrequest/types.go index 5c3d7111d4..d8d0c20b10 100644 --- a/protocol/guestrequest/types.go +++ b/protocol/guestrequest/types.go @@ -41,3 +41,16 @@ type RS4NetworkModifyRequest struct { RequestType RequestType `json:"RequestType,omitempty"` Settings interface{} `json:"Settings,omitempty"` } + +var ( + // V5 GUIDs for SCSI controllers + // These GUIDs are created with namespace GUID "d422512d-2bf2-4752-809d-7b82b5fcb1b4" + // and index as names. For example, first GUID is created like this: + // guid.NewV5("d422512d-2bf2-4752-809d-7b82b5fcb1b4", []byte("0")) + ScsiControllerGuids = []string{ + "df6d0690-79e5-55b6-a5ec-c1e2f77f580a", + "0110f83b-de10-5172-a266-78bca56bf50a", + "b5d2d8d4-3a75-51bf-945b-3444dc6b8579", + "305891a9-b251-5dfe-91a2-c25d9212275b", + } +) diff --git a/protocol/guestrequest/types_test.go b/protocol/guestrequest/types_test.go new file mode 100644 index 0000000000..52b5807c48 --- /dev/null +++ b/protocol/guestrequest/types_test.go @@ -0,0 +1,15 @@ +package guestrequest + +import ( + "github.com/Microsoft/go-winio/pkg/guid" + "testing" +) + +func TestGuidValidity(t *testing.T) { + for _, g := range ScsiControllerGuids { + _, err := guid.FromString(g) + if err != nil { + t.Fatalf("GUID parsing failed: %s", err) + } + } +} diff --git a/uvm/constants.go b/uvm/constants.go index 1ddcf903ab..4bd84f26d2 100644 --- a/uvm/constants.go +++ b/uvm/constants.go @@ -2,6 +2,8 @@ package uvm import ( "errors" + + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" ) const ( @@ -21,4 +23,7 @@ const ( var ( errNotSupported = errors.New("not supported") errBadUVMOpts = errors.New("UVM options incorrect") + + // Maximum number of SCSI controllers allowed + MaxSCSIControllers = uint32(len(guestrequest.ScsiControllerGuids)) ) diff --git a/uvm/create.go b/uvm/create.go index a3d9fcd4e6..1a08bae535 100644 --- a/uvm/create.go +++ b/uvm/create.go @@ -94,6 +94,9 @@ type Options struct { // NoWritableFileShares disables adding any writable vSMB and Plan9 shares to the UVM NoWritableFileShares bool + + // The number of SCSI controllers. Defaults to 1 for WCOW and 4 for LCOW + SCSIControllerCount uint32 } // compares the create opts used during template creation with the create opts @@ -131,8 +134,8 @@ func verifyOptions(ctx context.Context, options interface{}) error { if opts.EnableDeferredCommit && !opts.AllowOvercommit { return errors.New("EnableDeferredCommit is not supported on physically backed VMs") } - if opts.SCSIControllerCount > 1 { - return errors.New("SCSI controller count must be 0 or 1") // Future extension here for up to 4 + if opts.SCSIControllerCount > MaxSCSIControllers { + return fmt.Errorf("SCSI controller count can't be more than %d", MaxSCSIControllers) } if opts.VPMemDeviceCount > MaxVPMEMCount { return fmt.Errorf("VPMem device count cannot be greater than %d", MaxVPMEMCount) @@ -141,10 +144,6 @@ func verifyOptions(ctx context.Context, options interface{}) error { if opts.VPMemSizeBytes%4096 != 0 { return errors.New("VPMemSizeBytes must be a multiple of 4096") } - } else { - if opts.PreferredRootFSType == PreferredRootFSTypeVHD { - return errors.New("PreferredRootFSTypeVHD requires at least one VPMem device") - } } if opts.KernelDirect && osversion.Build() < 18286 { return errors.New("KernelDirectBoot is not supported on builds older than 18286") @@ -160,6 +159,9 @@ func verifyOptions(ctx context.Context, options interface{}) error { if len(opts.LayerFolders) < 2 { return errors.New("at least 2 LayerFolders must be supplied") } + if opts.SCSIControllerCount != 1 { + return errors.New("exactly 1 SCSI controller is required for WCOW") + } if opts.IsClone && !verifyCloneUvmCreateOpts(&opts.TemplateConfig.CreateOpts, opts) { return errors.New("clone configuration doesn't match with template configuration") } @@ -188,6 +190,7 @@ func newDefaultOptions(id, owner string) *Options { ProcessorCount: defaultProcessorCount(), FullyPhysicallyBacked: false, NoWritableFileShares: false, + SCSIControllerCount: 1, } if opts.Owner == "" { diff --git a/uvm/create_lcow.go b/uvm/create_lcow.go index 8df0e743e0..ada420cca2 100644 --- a/uvm/create_lcow.go +++ b/uvm/create_lcow.go @@ -23,6 +23,7 @@ import ( "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/processorinfo" + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/osversion" ) @@ -86,7 +87,6 @@ type OptionsLCOW struct { KernelBootOptions string // Additional boot options for the kernel EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe - SCSIControllerCount uint32 // The number of SCSI controllers. Defaults to 1. Currently we only support 0 or 1. UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true ExecCommandLine string // The command line to exec from init. Defaults to GCS ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false @@ -137,7 +137,6 @@ func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW { KernelBootOptions: "", EnableGraphicsConsole: false, ConsolePipe: "", - SCSIControllerCount: 1, UseGuestConnection: true, ExecCommandLine: fmt.Sprintf("/bin/gcs -v4 -log-format json -loglevel %s", logrus.StandardLogger().Level.String()), ForwardStdout: false, @@ -352,11 +351,11 @@ func makeLCOWVMGSDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ } if uvm.scsiControllerCount > 0 { - // TODO: JTERRY75 - this should enumerate scsicount and add an entry per value. - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + for i := 0; i < int(uvm.scsiControllerCount); i++ { + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ Attachments: make(map[string]hcsschema.Attachment), - }, + } } } @@ -537,13 +536,14 @@ func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcs } if uvm.scsiControllerCount > 0 { - // TODO: JTERRY75 - this should enumerate scsicount and add an entry per value. - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + for i := 0; i < int(uvm.scsiControllerCount); i++ { + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ Attachments: make(map[string]hcsschema.Attachment), - }, + } } } + if uvm.vpmemMaxCount > 0 { doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{ MaximumCount: uvm.vpmemMaxCount, @@ -558,48 +558,59 @@ func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcs kernelArgs = "initrd=/" + opts.RootFSFile } case PreferredRootFSTypeVHD: - // Support for VPMem VHD(X) booting rather than initrd.. - kernelArgs = "root=/dev/pmem0 ro rootwait init=/init" - imageFormat := "Vhd1" - if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" { - imageFormat = "Vhdx" - } - doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{ - "0": { - HostPath: rootfsFullPath, - ReadOnly: true, - ImageFormat: imageFormat, - }, - } - if uvm.vpmemMultiMapping { - pmem := newPackedVPMemDevice() - pmem.maxMappedDeviceCount = 1 - - st, err := os.Stat(rootfsFullPath) - if err != nil { - return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath) + if uvm.vpmemMaxCount > 0 { + // Support for VPMem VHD(X) booting rather than initrd.. + kernelArgs = "root=/dev/pmem0 ro rootwait init=/init" + imageFormat := "Vhd1" + if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" { + imageFormat = "Vhdx" } - devSize := pageAlign(uint64(st.Size())) - memReg, err := pmem.Allocate(devSize) - if err != nil { - return nil, errors.Wrap(err, "failed to allocate memory for rootfs") + doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{ + "0": { + HostPath: rootfsFullPath, + ReadOnly: true, + ImageFormat: imageFormat, + }, } - defer func() { + if uvm.vpmemMultiMapping { + pmem := newPackedVPMemDevice() + pmem.maxMappedDeviceCount = 1 + + st, err := os.Stat(rootfsFullPath) if err != nil { - if err = pmem.Release(memReg); err != nil { - log.G(ctx).WithError(err).Debug("failed to release memory region") - } + return nil, errors.Wrapf(err, "failed to stat rootfs: %q", rootfsFullPath) } - }() + devSize := pageAlign(uint64(st.Size())) + memReg, err := pmem.Allocate(devSize) + if err != nil { + return nil, errors.Wrap(err, "failed to allocate memory for rootfs") + } + defer func() { + if err != nil { + if err = pmem.Release(memReg); err != nil { + log.G(ctx).WithError(err).Debug("failed to release memory region") + } + } + }() - dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) - if err := pmem.mapVHDLayer(ctx, dev); err != nil { - return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") + dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) + if err := pmem.mapVHDLayer(ctx, dev); err != nil { + return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") + } + uvm.vpmemDevicesMultiMapped[0] = pmem + } else { + dev := newDefaultVPMemInfo(opts.RootFSFile, "/") + uvm.vpmemDevicesDefault[0] = dev } - uvm.vpmemDevicesMultiMapped[0] = pmem } else { - dev := newDefaultVPMemInfo(opts.RootFSFile, "/") - uvm.vpmemDevicesDefault[0] = dev + kernelArgs = "root=/dev/sda ro rootwait init=/init" + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{ + Type_: "VirtualDisk", + Path: rootfsFullPath, + ReadOnly: true, + } + uvm.scsiLocations[0][0] = newSCSIMount(uvm, rootfsFullPath, "/", "VirtualDisk", "", 1, 0, 0, true, false) + } } @@ -743,6 +754,12 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error } }() + // vpmemMaxCount has been set to 0 which means we are going to need multiple SCSI controllers + // to support lots of layers. + if osversion.Build() >= osversion.RS5 && uvm.vpmemMaxCount == 0 { + uvm.scsiControllerCount = 4 + } + if err = verifyOptions(ctx, opts); err != nil { return nil, errors.Wrap(err, errBadUVMOpts.Error()) } diff --git a/uvm/create_wcow.go b/uvm/create_wcow.go index 1d083ffda2..4a92fc962d 100644 --- a/uvm/create_wcow.go +++ b/uvm/create_wcow.go @@ -17,6 +17,7 @@ import ( "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/processorinfo" + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/internal/uvmfolder" "github.com/Microsoft/hcsshim/internal/wclayer" @@ -249,7 +250,7 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error id: opts.ID, owner: opts.Owner, operatingSystem: "windows", - scsiControllerCount: 1, + scsiControllerCount: opts.SCSIControllerCount, vsmbDirShares: make(map[string]*VSMBShare), vsmbFileShares: make(map[string]*VSMBShare), vpciDevices: make(map[VPCIDeviceKey]*VPCIDevice), @@ -310,21 +311,23 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error } } - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ - "0": { - Attachments: map[string]hcsschema.Attachment{ - "0": { - Path: scratchPath, - Type_: "VirtualDisk", - }, - }, - }, + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + for i := 0; i < int(uvm.scsiControllerCount); i++ { + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ + Attachments: make(map[string]hcsschema.Attachment), + } + } + + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{ + + Path: scratchPath, + Type_: "VirtualDisk", } uvm.scsiLocations[0][0] = newSCSIMount(uvm, - doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Path, + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"].Path, "", - doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Type_, + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"].Type_, "", 1, 0, diff --git a/uvm/scsi.go b/uvm/scsi.go index 5cac727387..78d7516ffa 100644 --- a/uvm/scsi.go +++ b/uvm/scsi.go @@ -8,7 +8,6 @@ import ( "io/ioutil" "os" "path/filepath" - "strconv" "strings" "github.com/Microsoft/go-winio/pkg/security" @@ -169,8 +168,8 @@ func newSCSIMount( // SCSI controllers associated with a utility VM to use. // Lock must be held when calling this function func (uvm *UtilityVM) allocateSCSISlot(ctx context.Context) (int, int, error) { - for controller, luns := range uvm.scsiLocations { - for lun, sm := range luns { + for controller := 0; controller < int(uvm.scsiControllerCount); controller++ { + for lun, sm := range uvm.scsiLocations[controller] { // If sm is nil, we have found an open slot so we allocate a new SCSIMount if sm == nil { return controller, lun, nil @@ -224,7 +223,7 @@ func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { scsiModification := &hcsschema.ModifySettingRequest{ RequestType: guestrequest.RequestTypeRemove, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, strconv.Itoa(sm.Controller), sm.LUN), + ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[sm.Controller], sm.LUN), } var verity *guestresource.DeviceVerityInfo @@ -408,11 +407,6 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, addReq *addSCSIRequest) return nil, ErrNoSCSIControllers } - // Note: Can remove this check post-RS5 if multiple controllers are supported - if sm.Controller > 0 { - return nil, ErrTooManyAttachments - } - SCSIModification := &hcsschema.ModifySettingRequest{ RequestType: guestrequest.RequestTypeAdd, Settings: hcsschema.Attachment{ @@ -421,7 +415,7 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, addReq *addSCSIRequest) ReadOnly: addReq.readOnly, ExtensibleVirtualDiskType: addReq.evdType, }, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, strconv.Itoa(sm.Controller), sm.LUN), + ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[sm.Controller], sm.LUN), } if sm.UVMPath != "" { @@ -637,7 +631,7 @@ func (sm *SCSIMount) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) er dstVhdPath string = sm.HostPath err error dir string - conStr string = fmt.Sprintf("%d", sm.Controller) + conStr string = guestrequest.ScsiControllerGuids[sm.Controller] lunStr string = fmt.Sprintf("%d", sm.LUN) )