Skip to content

Commit

Permalink
Add volume mount support for job containers
Browse files Browse the repository at this point in the history
This adds basic directory mount support for job containers. As any path on the host
is already accessible from the container, the concept of volume mounts is a bit funny
for job containers. However, it still makes sense to treat the volume mount point where
the container image is mounted as where most things should be found regarding the container.

The manner in which this is done is by appending the container mount path for the volume to
where the rootfs volume is mounted on the host and then symlinking it.

So:
Container rootfs volume path = "C:\C\123456789abcdefgh\"

Example #1
--------------
{
    "host_path": "C:\mydir"
    "container_path": "\dir\in\container"
}

"C:\mydir" would be symlinked to "C:\C\123456789abcdefgh\dir\in\container"

Example #2
---------------
Drive letters will be stripped
{
    "host_path": "C:\mydir"
    "container_path": "C:\dir\in\container"
}
"C:\mydir" would be symlinked to "C:\C\123456789abcdefgh\dir\in\container"

Signed-off-by: Daniel Canter <dcanter@microsoft.com>
  • Loading branch information
dcantah committed Jul 1, 2021
1 parent 43d161b commit d396770
Show file tree
Hide file tree
Showing 3 changed files with 148 additions and 9 deletions.
6 changes: 5 additions & 1 deletion internal/jobcontainers/jobcontainer.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,10 @@ func Create(ctx context.Context, id string, s *specs.Spec) (_ cow.Container, _ *
layers := layers.NewImageLayers(nil, "", s.Windows.LayerFolders, sandboxPath, false)
r.SetLayers(layers)

if err := setupMounts(s, container.sandboxMount); err != nil {
return nil, nil, err
}

volumeGUIDRegex := `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$`
if matched, err := regexp.MatchString(volumeGUIDRegex, s.Root.Path); !matched || err != nil {
return nil, nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, s.Root.Path)
Expand Down Expand Up @@ -524,7 +528,7 @@ func systemProcessInformation() ([]*winapi.SYSTEM_PROCESS_INFORMATION, error) {
var (
systemProcInfo *winapi.SYSTEM_PROCESS_INFORMATION
procInfos []*winapi.SYSTEM_PROCESS_INFORMATION
// This happens to be the buffer size hcs uses but there's no really no hard need to keep it
// This happens to be the buffer size hcs uses but there's really no hard need to keep it
// the same, it's just a sane default.
size = uint32(1024 * 512)
bounds uintptr
Expand Down
58 changes: 58 additions & 0 deletions internal/jobcontainers/mounts.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
package jobcontainers

import (
"fmt"
"os"
"path/filepath"
"strings"

specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)

// namedPipePath returns true if the given path is to a named pipe.
func namedPipePath(p string) bool {
return strings.HasPrefix(p, `\\.\pipe\`)
}

// Strip the drive letter (if there is one) so we don't end up with "%CONTAINER_SANDBOX_MOUNT_POINT%"\C:\path\to\mount
func stripDriveLetter(name string) string {
// Remove drive letter
if len(name) == 2 && name[1] == ':' {
name = "."
} else if len(name) > 2 && name[1] == ':' {
name = name[2:]
}
return name
}

// setupMounts adds the custom mounts requested in the OCI runtime spec. Mounts are a bit funny as you already have
// access to everything on the host, so just symlink in whatever was requested to the path where the container volume
// is mounted. At least then the mount can be accessed from a path relative to the default working directory/where the volume
// is.
func setupMounts(spec *specs.Spec, sandboxVolumePath string) error {
for _, mount := range spec.Mounts {
if mount.Destination == "" || mount.Source == "" {
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
}

if namedPipePath(mount.Source) {
return errors.New("named pipe mounts not supported for job containers - interact with the pipe directly")
}

fullCtrPath := filepath.Join(sandboxVolumePath, stripDriveLetter(mount.Destination))
// Take off the last element in the path so we don't get "file already exists" when we go to symlink.
strippedCtrPath := filepath.Dir(fullCtrPath)
if _, err := os.Stat(strippedCtrPath); os.IsNotExist(err) {
if err := os.MkdirAll(strippedCtrPath, 0777); err != nil {
return errors.Wrap(err, "failed to make directory for job container mount")
}
}

if err := os.Symlink(mount.Source, fullCtrPath); err != nil {
return errors.Wrap(err, "failed to setup mount for job container")
}
}

return nil
}
93 changes: 85 additions & 8 deletions test/cri-containerd/jobcontainer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func getJobContainerPodRequestWCOW(t *testing.T) *runtime.RunPodSandboxRequest {
}
}

func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.PodSandboxConfig, image string) *runtime.CreateContainerRequest {
func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.PodSandboxConfig, image string, mounts []*runtime.Mount) *runtime.CreateContainerRequest {
return &runtime.CreateContainerRequest{
Config: &runtime.ContainerConfig{
Metadata: &runtime.ContainerMetadata{
Expand All @@ -49,11 +49,12 @@ func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.P
"-t",
"127.0.0.1",
},

Mounts: mounts,
Annotations: map[string]string{
"microsoft.com/hostprocess-container": "true",
"microsoft.com/hostprocess-inherit-user": "true",
},
Windows: &runtime.WindowsContainerConfig{},
},
PodSandboxId: podID,
SandboxConfig: podConfig,
Expand All @@ -74,7 +75,7 @@ func Test_RunContainer_InheritUser_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -113,7 +114,7 @@ func Test_RunContainer_Hostname_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -146,7 +147,7 @@ func Test_RunContainer_HNS_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerHNS)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerHNS, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -193,7 +194,7 @@ func Test_RunContainer_VHD_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerVHD)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerVHD, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -244,7 +245,7 @@ func Test_RunContainer_ETW_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerETW)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerETW, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -299,7 +300,7 @@ func Test_RunContainer_HostVolumes_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand All @@ -326,3 +327,79 @@ func Test_RunContainer_HostVolumes_JobContainer_WCOW(t *testing.T) {
t.Fatalf("expected volumes to be the same within job process container. got %q but expected %q", hostStdout, containerStdout)
}
}

func Test_RunContainer_LocalService(t *testing.T) {
requireFeatures(t, featureWCOWProcess, featureHostProcess)

pullRequiredImages(t, []string{imageWindowsNanoserver})
client := newTestRuntimeClient(t)

podctx := context.Background()
sandboxRequest := getJobContainerPodRequestWCOW(t)

podID := runPodSandbox(t, client, podctx, sandboxRequest)
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

containerRequest.Config.Annotations["microsoft.com/hostprocess-inherit-user"] = "false"
containerRequest.Config.Windows.SecurityContext.RunAsUsername = "nt authority\\LocalService"
containerID := createContainer(t, client, ctx, containerRequest)
defer removeContainer(t, client, ctx, containerID)
startContainer(t, client, ctx, containerID)
defer stopContainer(t, client, ctx, containerID)
}

func Test_RunContainer_VolumeMount(t *testing.T) {
requireFeatures(t, featureWCOWProcess, featureHostProcess)

pullRequiredImages(t, []string{imageWindowsNanoserver})
client := newTestRuntimeClient(t)

podctx := context.Background()
sandboxRequest := getJobContainerPodRequestWCOW(t)

podID := runPodSandbox(t, client, podctx, sandboxRequest)
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

dir, err := ioutil.TempDir("", "example")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)

tmpfn := filepath.Join(dir, "tmpfile")
_, err = os.Create(tmpfn)
if err != nil {
t.Fatal(err)
}

mounts := []*runtime.Mount{
{
HostPath: dir,
ContainerPath: "/path/in/container",
},
}

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, mounts)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

containerID := createContainer(t, client, ctx, containerRequest)
defer removeContainer(t, client, ctx, containerID)
startContainer(t, client, ctx, containerID)
defer stopContainer(t, client, ctx, containerID)

// Check that mount is under the containers volume
r := execSync(t, client, ctx, &runtime.ExecSyncRequest{
ContainerId: containerID,
Cmd: []string{"cmd", "/c", "dir", "%CONTAINER_SANDBOX_MOUNT_POINT%\\path\\in\\container\\tmpfile"},
})
if r.ExitCode != 0 {
t.Fatalf("failed with exit code %d checking for job container mount: %s", r.ExitCode, string(r.Stderr))
}
}

0 comments on commit d396770

Please sign in to comment.