Skip to content

Commit

Permalink
Merge pull request #1057 from dcantah/jobcontainer-volume
Browse files Browse the repository at this point in the history
Add volume mount support for job containers
  • Loading branch information
dcantah authored Jul 15, 2021
2 parents 837c300 + 90a193c commit 40d9010
Show file tree
Hide file tree
Showing 4 changed files with 208 additions and 9 deletions.
6 changes: 5 additions & 1 deletion internal/jobcontainers/jobcontainer.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,10 @@ func Create(ctx context.Context, id string, s *specs.Spec) (_ cow.Container, _ *
layers := layers.NewImageLayers(nil, "", s.Windows.LayerFolders, sandboxPath, false)
r.SetLayers(layers)

if err := setupMounts(s, container.sandboxMount); err != nil {
return nil, nil, err
}

volumeGUIDRegex := `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$`
if matched, err := regexp.MatchString(volumeGUIDRegex, s.Root.Path); !matched || err != nil {
return nil, nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, s.Root.Path)
Expand Down Expand Up @@ -524,7 +528,7 @@ func systemProcessInformation() ([]*winapi.SYSTEM_PROCESS_INFORMATION, error) {
var (
systemProcInfo *winapi.SYSTEM_PROCESS_INFORMATION
procInfos []*winapi.SYSTEM_PROCESS_INFORMATION
// This happens to be the buffer size hcs uses but there's no really no hard need to keep it
// This happens to be the buffer size hcs uses but there's really no hard need to keep it
// the same, it's just a sane default.
size = uint32(1024 * 512)
bounds uintptr
Expand Down
56 changes: 56 additions & 0 deletions internal/jobcontainers/mounts.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
package jobcontainers

import (
"fmt"
"os"
"path/filepath"
"strings"

specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)

// namedPipePath returns true if the given path is to a named pipe.
func isnamedPipePath(p string) bool {
return strings.HasPrefix(p, `\\.\pipe\`)
}

// Strip the drive letter (if there is one) so we don't end up with "%CONTAINER_SANDBOX_MOUNT_POINT%"\C:\path\to\mount
func stripDriveLetter(name string) string {
// Remove drive letter
if len(name) == 2 && name[1] == ':' {
name = "."
} else if len(name) > 2 && name[1] == ':' {
name = name[2:]
}
return name
}

// setupMounts adds the custom mounts requested in the OCI runtime spec. Mounts are a bit funny as you already have
// access to everything on the host, so just symlink in whatever was requested to the path where the container volume
// is mounted. At least then the mount can be accessed from a path relative to the default working directory/where the volume
// is.
func setupMounts(spec *specs.Spec, sandboxVolumePath string) error {
for _, mount := range spec.Mounts {
if mount.Destination == "" || mount.Source == "" {
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
}

if isnamedPipePath(mount.Source) {
return errors.New("named pipe mounts not supported for job containers - interact with the pipe directly")
}

fullCtrPath := filepath.Join(sandboxVolumePath, stripDriveLetter(mount.Destination))
// Make sure all of the dirs leading up to the full path exist.
strippedCtrPath := filepath.Dir(fullCtrPath)
if err := os.MkdirAll(strippedCtrPath, 0777); err != nil {
return errors.Wrap(err, "failed to make directory for job container mount")
}

if err := os.Symlink(mount.Source, fullCtrPath); err != nil {
return errors.Wrap(err, "failed to setup mount for job container")
}
}

return nil
}
21 changes: 21 additions & 0 deletions internal/jobcontainers/mounts_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
package jobcontainers

import (
"testing"

specs "github.com/opencontainers/runtime-spec/specs-go"
)

func TestNamePipeDeny(t *testing.T) {
s := &specs.Spec{
Mounts: []specs.Mount{
{
Destination: "/path/in/container",
Source: `\\.\pipe\dummy\path`,
},
},
}
if err := setupMounts(s, "/test"); err == nil {
t.Fatal("expected named pipe mount validation to fail for job container")
}
}
134 changes: 126 additions & 8 deletions test/cri-containerd/jobcontainer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func getJobContainerPodRequestWCOW(t *testing.T) *runtime.RunPodSandboxRequest {
}
}

func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.PodSandboxConfig, image string) *runtime.CreateContainerRequest {
func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.PodSandboxConfig, image string, mounts []*runtime.Mount) *runtime.CreateContainerRequest {
return &runtime.CreateContainerRequest{
Config: &runtime.ContainerConfig{
Metadata: &runtime.ContainerMetadata{
Expand All @@ -50,11 +50,12 @@ func getJobContainerRequestWCOW(t *testing.T, podID string, podConfig *runtime.P
"-t",
"127.0.0.1",
},

Mounts: mounts,
Annotations: map[string]string{
oci.AnnotationHostProcessContainer: "true",
oci.AnnotationHostProcessInheritUser: "true",
},
Windows: &runtime.WindowsContainerConfig{},
},
PodSandboxId: podID,
SandboxConfig: podConfig,
Expand All @@ -75,7 +76,7 @@ func Test_RunContainer_InheritUser_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -114,7 +115,7 @@ func Test_RunContainer_Hostname_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -147,7 +148,7 @@ func Test_RunContainer_HNS_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerHNS)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerHNS, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -194,7 +195,7 @@ func Test_RunContainer_VHD_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerVHD)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerVHD, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -245,7 +246,7 @@ func Test_RunContainer_ETW_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerETW)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageJobContainerETW, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand Down Expand Up @@ -300,7 +301,7 @@ func Test_RunContainer_HostVolumes_JobContainer_WCOW(t *testing.T) {
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver)
containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, nil)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

Expand All @@ -327,3 +328,120 @@ func Test_RunContainer_HostVolumes_JobContainer_WCOW(t *testing.T) {
t.Fatalf("expected volumes to be the same within job process container. got %q but expected %q", hostStdout, containerStdout)
}
}

func Test_RunContainer_JobContainer_VolumeMount(t *testing.T) {
client := newTestRuntimeClient(t)

dir, err := ioutil.TempDir("", "example")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)

tmpfn := filepath.Join(dir, "tmpfile")
if err := ioutil.WriteFile(tmpfn, []byte("test"), 0777); err != nil {
t.Fatal(err)
}

mountDriveLetter := []*runtime.Mount{
{
HostPath: dir,
ContainerPath: "C:\\path\\in\\container",
},
}

mountNoDriveLetter := []*runtime.Mount{
{
HostPath: dir,
ContainerPath: "/path/in/container",
},
}

mountSingleFile := []*runtime.Mount{
{
HostPath: tmpfn,
ContainerPath: "/path/in/container/testfile",
},
}

type config struct {
name string
containerName string
requiredFeatures []string
runtimeHandler string
sandboxImage string
containerImage string
cmd []string
exec []string
mounts []*runtime.Mount
}

tests := []config{
{
name: "JobContainer_VolumeMount_DriveLetter",
containerName: t.Name() + "-Container-DriveLetter",
requiredFeatures: []string{featureWCOWProcess, featureHostProcess},
runtimeHandler: wcowProcessRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
containerImage: imageWindowsNanoserver,
cmd: []string{"ping", "-t", "127.0.0.1"},
mounts: mountDriveLetter,
exec: []string{"cmd", "/c", "dir", "%CONTAINER_SANDBOX_MOUNT_POINT%\\path\\in\\container\\tmpfile"},
},
{
name: "JobContainer_VolumeMount_NoDriveLetter",
containerName: t.Name() + "-Container-NoDriveLetter",
requiredFeatures: []string{featureWCOWProcess, featureHostProcess},
runtimeHandler: wcowProcessRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
containerImage: imageWindowsNanoserver,
cmd: []string{"ping", "-t", "127.0.0.1"},
mounts: mountNoDriveLetter,
exec: []string{"cmd", "/c", "dir", "%CONTAINER_SANDBOX_MOUNT_POINT%\\path\\in\\container\\tmpfile"},
},
{
name: "JobContainer_VolumeMount_SingleFile",
containerName: t.Name() + "-Container-SingleFile",
requiredFeatures: []string{featureWCOWProcess, featureHostProcess},
runtimeHandler: wcowProcessRuntimeHandler,
sandboxImage: imageWindowsNanoserver,
containerImage: imageWindowsNanoserver,
cmd: []string{"ping", "-t", "127.0.0.1"},
mounts: mountSingleFile,
exec: []string{"cmd", "/c", "type", "%CONTAINER_SANDBOX_MOUNT_POINT%\\path\\in\\container\\testfile"},
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requireFeatures(t, test.requiredFeatures...)

requiredImages := []string{test.sandboxImage, test.containerImage}
pullRequiredImages(t, requiredImages)

podctx := context.Background()
sandboxRequest := getJobContainerPodRequestWCOW(t)

podID := runPodSandbox(t, client, podctx, sandboxRequest)
defer removePodSandbox(t, client, podctx, podID)
defer stopPodSandbox(t, client, podctx, podID)

containerRequest := getJobContainerRequestWCOW(t, podID, sandboxRequest.Config, imageWindowsNanoserver, test.mounts)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()

containerID := createContainer(t, client, ctx, containerRequest)
defer removeContainer(t, client, ctx, containerID)
startContainer(t, client, ctx, containerID)
defer stopContainer(t, client, ctx, containerID)

r := execSync(t, client, ctx, &runtime.ExecSyncRequest{
ContainerId: containerID,
Cmd: test.exec,
})
if r.ExitCode != 0 {
t.Fatalf("failed with exit code %d checking for job container mount: %s", r.ExitCode, string(r.Stderr))
}
})
}
}

0 comments on commit 40d9010

Please sign in to comment.