diff --git a/.gitignore b/.gitignore index 292630f9ea..8c32a3a6ec 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ service/pkg/ *.img *.vhd *.tar.gz +*.tar # Make stuff .rootfs-done @@ -32,9 +33,16 @@ rootfs/* rootfs-conv/* *.o /build/ - deps/* out/* +# test results +test/results + +# ninja build +.ninja_log +build.ninja + +# go workspaces go.work -go.work.sum \ No newline at end of file +go.work.sum diff --git a/test/gcs/container_bench_test.go b/test/gcs/container_bench_test.go new file mode 100644 index 0000000000..5f481c190c --- /dev/null +++ b/test/gcs/container_bench_test.go @@ -0,0 +1,222 @@ +//go:build linux + +package gcs + +import ( + "context" + "testing" + + "github.com/Microsoft/hcsshim/internal/guest/prot" + "github.com/Microsoft/hcsshim/internal/guest/runtime/hcsv2" + "github.com/Microsoft/hcsshim/internal/guest/stdio" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/oci" + cri_util "github.com/containerd/containerd/pkg/cri/util" + + testoci "github.com/Microsoft/hcsshim/test/internal/oci" +) + +func BenchmarkContainerCreate(b *testing.B) { + requireFeatures(b, featureStandalone) + ctx := namespaces.WithNamespace(context.Background(), testoci.DefaultNamespace) + host, _ := getTestState(ctx, b) + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id := b.Name() + cri_util.GenerateID() + scratch, rootfs := mountRootfs(ctx, b, host, id) + + s := testoci.CreateLinuxSpec(ctx, b, id, + testoci.DefaultLinuxSpecOpts(id, + oci.WithRootFSPath(rootfs), + oci.WithProcessArgs("/bin/sh", "-c", tailNull), + )..., + ) + r := &prot.VMHostedContainerSettingsV2{ + OCIBundlePath: scratch, + OCISpecification: s, + } + + b.StartTimer() + c := createContainer(ctx, b, host, id, r) + b.StopTimer() + + // create launches background go-routines + // so kill container to end those and avoid future perf hits + killContainer(ctx, b, c) + deleteContainer(ctx, b, c) + removeContainer(ctx, b, host, id) + unmountRootfs(ctx, b, scratch) + } +} + +func BenchmarkContainerStart(b *testing.B) { + requireFeatures(b, featureStandalone) + ctx := context.Background() + host, _ := getTestState(ctx, b) + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id, r, cleanup := _standaloneContainerRequest(ctx, b, host) + + c := createContainer(ctx, b, host, id, r) + + b.StartTimer() + p := startContainer(ctx, b, c, stdio.ConnectionSettings{}) + b.StopTimer() + + killContainer(ctx, b, c) + waitContainer(ctx, b, c, p, true) + cleanupContainer(ctx, b, host, c) + cleanup() + } +} + +func BenchmarkContainerKill(b *testing.B) { + requireFeatures(b, featureStandalone) + ctx := context.Background() + host, _ := getTestState(ctx, b) + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id, r, cleanup := _standaloneContainerRequest(ctx, b, host) + c := createContainer(ctx, b, host, id, r) + p := startContainer(ctx, b, c, stdio.ConnectionSettings{}) + + b.StartTimer() + killContainer(ctx, b, c) + _, n := waitContainerRaw(c, p) + b.StopTimer() + + switch n { + case prot.NtForcedExit: + default: + b.Fatalf("container exit was %s", n) + } + + cleanupContainer(ctx, b, host, c) + cleanup() + } +} + +// container create through till wait and exit +func BenchmarkContainerCompleteExit(b *testing.B) { + requireFeatures(b, featureStandalone) + ctx := context.Background() + host, _ := getTestState(ctx, b) + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id, r, cleanup := _standaloneContainerRequest(ctx, b, host, oci.WithProcessArgs("/bin/sh", "-c", "true")) + + b.StartTimer() + c := createContainer(ctx, b, host, id, r) + p := startContainer(ctx, b, c, stdio.ConnectionSettings{}) + e, n := waitContainerRaw(c, p) + b.StopTimer() + + switch n { + case prot.NtGracefulExit, prot.NtUnexpectedExit: + default: + b.Fatalf("container exit was %s", n) + } + + if e != 0 { + b.Fatalf("container exit code was %d", e) + } + + killContainer(ctx, b, c) + c.Wait() + cleanupContainer(ctx, b, host, c) + cleanup() + } +} + +func BenchmarkContainerCompleteKill(b *testing.B) { + requireFeatures(b, featureStandalone) + ctx := context.Background() + host, _ := getTestState(ctx, b) + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id, r, cleanup := _standaloneContainerRequest(ctx, b, host) + + b.StartTimer() + c := createContainer(ctx, b, host, id, r) + p := startContainer(ctx, b, c, stdio.ConnectionSettings{}) + killContainer(ctx, b, c) + _, n := waitContainerRaw(c, p) + b.StopTimer() + + switch n { + case prot.NtForcedExit: + default: + b.Fatalf("container exit was %s", n) + } + + cleanupContainer(ctx, b, host, c) + cleanup() + } +} + +func BenchmarkContainerExec(b *testing.B) { + requireFeatures(b, featureStandalone) + ctx := namespaces.WithNamespace(context.Background(), testoci.DefaultNamespace) + host, _ := getTestState(ctx, b) + + id := b.Name() + c := createStandaloneContainer(ctx, b, host, id) + ip := startContainer(ctx, b, c, stdio.ConnectionSettings{}) + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ps := testoci.CreateLinuxSpec(ctx, b, id, + // oci.WithTTY, + oci.WithDefaultPathEnv, + oci.WithProcessArgs("/bin/sh", "-c", "true"), + ).Process + + b.StartTimer() + p := execProcess(ctx, b, c, ps, stdio.ConnectionSettings{}) + exch, dch := p.Wait() + if e := <-exch; e != 0 { + b.Errorf("process exited with error code %d", e) + } + b.StopTimer() + + dch <- true + close(dch) + } + + killContainer(ctx, b, c) + waitContainer(ctx, b, c, ip, true) + cleanupContainer(ctx, b, host, c) +} + +func _standaloneContainerRequest(ctx context.Context, t testing.TB, host *hcsv2.Host, extra ...oci.SpecOpts) (string, *prot.VMHostedContainerSettingsV2, func()) { + ctx = namespaces.WithNamespace(ctx, testoci.DefaultNamespace) + id := t.Name() + cri_util.GenerateID() + scratch, rootfs := mountRootfs(ctx, t, host, id) + + opts := testoci.DefaultLinuxSpecOpts(id, + oci.WithRootFSPath(rootfs), + oci.WithProcessArgs("/bin/sh", "-c", tailNull), + ) + opts = append(opts, extra...) + s := testoci.CreateLinuxSpec(ctx, t, id, opts...) + r := &prot.VMHostedContainerSettingsV2{ + OCIBundlePath: scratch, + OCISpecification: s, + } + f := func() { + unmountRootfs(ctx, t, scratch) + } + + return id, r, f +} diff --git a/test/gcs/container_test.go b/test/gcs/container_test.go new file mode 100644 index 0000000000..a259abeb7e --- /dev/null +++ b/test/gcs/container_test.go @@ -0,0 +1,240 @@ +//go:build linux + +package gcs + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/oci" + "golang.org/x/sync/errgroup" + + "github.com/Microsoft/hcsshim/internal/guest/gcserr" + "github.com/Microsoft/hcsshim/internal/guest/stdio" + + testoci "github.com/Microsoft/hcsshim/test/internal/oci" +) + +// +// tests for operations on standalone containers +// + +// todo: using `oci.WithTTY` for IO tests is broken and hangs + +func TestContainerCreate(t *testing.T) { + requireFeatures(t, featureStandalone) + + ctx := context.Background() + host, rtime := getTestState(ctx, t) + assertNumberContainers(ctx, t, rtime, 0) + + id := t.Name() + c := createStandaloneContainer(ctx, t, host, id) + t.Cleanup(func() { + cleanupContainer(ctx, t, host, c) + }) + + p := startContainer(ctx, t, c, stdio.ConnectionSettings{}) + t.Cleanup(func() { + killContainer(ctx, t, c) + waitContainer(ctx, t, c, p, true) + }) + + assertNumberContainers(ctx, t, rtime, 1) + css := listContainerStates(ctx, t, rtime) + cs := css[0] + if cs.ID != id { + t.Fatalf("got id %q, wanted %q", cs.ID, id) + } + pid := p.Pid() + if pid != cs.Pid { + t.Fatalf("got pid %d, wanted %d", pid, cs.Pid) + } + if cs.Status != "running" { + t.Fatalf("got status %q, wanted %q", cs.Status, "running") + } +} + +func TestContainerDelete(t *testing.T) { + requireFeatures(t, featureStandalone) + + ctx := context.Background() + host, rtime := getTestState(ctx, t) + assertNumberContainers(ctx, t, rtime, 0) + + id := t.Name() + + c := createStandaloneContainer(ctx, t, host, id, + oci.WithProcessArgs("/bin/sh", "-c", "true"), + ) + + p := startContainer(ctx, t, c, stdio.ConnectionSettings{}) + waitContainer(ctx, t, c, p, false) + + cleanupContainer(ctx, t, host, c) + + // getContainer will Fatal + _, err := host.GetContainer(id) + if hr, herr := gcserr.GetHresult(err); herr != nil || hr != gcserr.HrVmcomputeSystemNotFound { + t.Fatalf("GetContainer returned %v, wanted %v", err, gcserr.HrVmcomputeSystemNotFound) + } + assertNumberContainers(ctx, t, rtime, 0) +} + +// +// IO +// + +var ioTests = []struct { + name string + args []string + in string + want string +}{ + { + name: "true", + args: []string{"/bin/sh", "-c", "true"}, + want: "", + }, + { + name: "echo", + args: []string{"/bin/sh", "-c", `echo -n "hi y'all"`}, + want: "hi y'all", + }, + { + name: "tee", + args: []string{"/bin/sh", "-c", "tee"}, + in: "are you copying me?", + want: "are you copying me?", + }, +} + +func TestContainerIO(t *testing.T) { + requireFeatures(t, featureStandalone) + + ctx := context.Background() + host, rtime := getTestState(ctx, t) + assertNumberContainers(ctx, t, rtime, 0) + + for _, tt := range ioTests { + t.Run(tt.name, func(t *testing.T) { + id := strings.ReplaceAll(t.Name(), "/", "") + + con := newConnectionSettings(tt.in != "", true, true) + f := createStdIO(ctx, t, con) + + var outStr, errStr string + g := &errgroup.Group{} + g.Go(func() error { + outStr = f.ReadAllOut(ctx, t) + + return nil + }) + g.Go(func() error { + errStr = f.ReadAllErr(ctx, t) + + return nil + }) + + c := createStandaloneContainer(ctx, t, host, id, + // oci.WithTTY, + oci.WithProcessArgs(tt.args...), + ) + t.Cleanup(func() { + cleanupContainer(ctx, t, host, c) + }) + p := startContainer(ctx, t, c, con) + + f.WriteIn(ctx, t, tt.in) + f.CloseIn(ctx, t) + t.Logf("wrote to stdin: %q", tt.in) + + waitContainer(ctx, t, c, p, false) + + g.Wait() + t.Logf("stdout: %q", outStr) + t.Logf("stderr: %q", errStr) + + if errStr != "" { + t.Fatalf("container returned error %q", errStr) + } + if outStr != tt.want { + t.Fatalf("container returned %q; wanted %q", outStr, tt.want) + } + }) + } + + assertNumberContainers(ctx, t, rtime, 0) +} + +func TestContainerExec(t *testing.T) { + requireFeatures(t, featureStandalone) + + ctx := namespaces.WithNamespace(context.Background(), testoci.DefaultNamespace) + host, rtime := getTestState(ctx, t) + assertNumberContainers(ctx, t, rtime, 0) + + id := t.Name() + c := createStandaloneContainer(ctx, t, host, id) + t.Cleanup(func() { + cleanupContainer(ctx, t, host, c) + }) + + ip := startContainer(ctx, t, c, stdio.ConnectionSettings{}) + t.Cleanup(func() { + killContainer(ctx, t, c) + waitContainer(ctx, t, c, ip, true) + }) + + for _, tt := range ioTests { + t.Run(tt.name, func(t *testing.T) { + ps := testoci.CreateLinuxSpec(ctx, t, id, + // oci.WithTTY, + oci.WithDefaultPathEnv, + oci.WithProcessArgs(tt.args...), + ).Process + con := newConnectionSettings(tt.in != "", true, true) + f := createStdIO(ctx, t, con) + // t.Logf("got channels %+v", f) + + var outStr, errStr string + g := &errgroup.Group{} + g.Go(func() error { + outStr = f.ReadAllOut(ctx, t) + + return nil + }) + g.Go(func() error { + errStr = f.ReadAllErr(ctx, t) + + return nil + }) + + // OS pipes can lose some data, so sleep a bit to let ReadAll* kick off + time.Sleep(10 * time.Millisecond) + + p := execProcess(ctx, t, c, ps, con) + f.WriteIn(ctx, t, tt.in) + f.CloseIn(ctx, t) + t.Logf("wrote std in: %q", tt.in) + + exch, _ := p.Wait() + if i := <-exch; i != 0 { + t.Errorf("process exited with error code %d", i) + } + + g.Wait() + t.Logf("stdout: %q", outStr) + t.Logf("stderr: %q", errStr) + + if errStr != "" { + t.Fatalf("exec returned error %q", errStr) + } else if outStr != tt.want { + t.Fatalf("process returned %q; wanted %q", outStr, tt.want) + } + }) + } +} diff --git a/test/gcs/cri_bench_test.go b/test/gcs/cri_bench_test.go new file mode 100644 index 0000000000..da4e7bd726 --- /dev/null +++ b/test/gcs/cri_bench_test.go @@ -0,0 +1,235 @@ +//go:build linux + +package gcs + +import ( + "context" + "testing" + + cri_util "github.com/containerd/containerd/pkg/cri/util" + + "github.com/Microsoft/hcsshim/internal/guest/prot" + "github.com/Microsoft/hcsshim/internal/guest/runtime/hcsv2" + "github.com/Microsoft/hcsshim/internal/guest/stdio" +) + +func BenchmarkCRISanboxCreate(b *testing.B) { + requireFeatures(b, featureCRI) + ctx := context.Background() + host, _ := getTestState(ctx, b) + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id := cri_util.GenerateID() + scratch, rootfs := mountRootfs(ctx, b, host, id) + nns := id + createNamespace(ctx, b, nns) + spec := sandboxSpec(ctx, b, "test-bench-sandbox", id, nns, rootfs) + r := &prot.VMHostedContainerSettingsV2{ + OCIBundlePath: scratch, + OCISpecification: spec, + } + + b.StartTimer() + c := createContainer(ctx, b, host, id, r) + b.StopTimer() + + // create launches background go-routines + // so kill container to end those and avoid future perf hits + killContainer(ctx, b, c) + cleanupContainer(ctx, b, host, c) + unmountRootfs(ctx, b, scratch) + removeNamespace(ctx, b, nns) + } +} + +func BenchmarkCRISandboxStart(b *testing.B) { + requireFeatures(b, featureCRI) + ctx := context.Background() + host, _ := getTestState(ctx, b) + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id := cri_util.GenerateID() + scratch, rootfs := mountRootfs(ctx, b, host, id) + nns := id + createNamespace(ctx, b, nns) + spec := sandboxSpec(ctx, b, "test-bench-sandbox", id, nns, rootfs) + r := &prot.VMHostedContainerSettingsV2{ + OCIBundlePath: scratch, + OCISpecification: spec, + } + + c := createContainer(ctx, b, host, id, r) + + b.StartTimer() + p := startContainer(ctx, b, c, stdio.ConnectionSettings{}) + b.StopTimer() + + killContainer(ctx, b, c) + waitContainer(ctx, b, c, p, true) + cleanupContainer(ctx, b, host, c) + unmountRootfs(ctx, b, scratch) + removeNamespace(ctx, b, nns) + } +} + +func BenchmarkCRISandboxKill(b *testing.B) { + requireFeatures(b, featureCRI) + ctx := context.Background() + host, _ := getTestState(ctx, b) + + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id := cri_util.GenerateID() + scratch, rootfs := mountRootfs(ctx, b, host, id) + nns := id + createNamespace(ctx, b, nns) + spec := sandboxSpec(ctx, b, "test-bench-sandbox", id, nns, rootfs) + r := &prot.VMHostedContainerSettingsV2{ + OCIBundlePath: scratch, + OCISpecification: spec, + } + + c := createContainer(ctx, b, host, id, r) + p := startContainer(ctx, b, c, stdio.ConnectionSettings{}) + + b.StartTimer() + killContainer(ctx, b, c) + _, n := waitContainerRaw(c, p) + b.StopTimer() + + switch n { + case prot.NtForcedExit: + default: + b.Fatalf("container exit was %s", n) + } + + cleanupContainer(ctx, b, host, c) + removeNamespace(ctx, b, nns) + unmountRootfs(ctx, b, scratch) + } +} + +func BenchmarkCRIWorkload(b *testing.B) { + requireFeatures(b, featureCRI) + ctx := context.Background() + host, _ := getTestState(ctx, b) + + sid := b.Name() + sScratch, sRootfs := mountRootfs(ctx, b, host, sid) + b.Cleanup(func() { + unmountRootfs(ctx, b, sScratch) + }) + nns := sid + createNamespace(ctx, b, nns) + b.Cleanup(func() { + removeNamespace(ctx, b, nns) + }) + + sSpec := sandboxSpec(ctx, b, "test-bench-sandbox", sid, nns, sRootfs) + sandbox := createContainer(ctx, b, host, sid, &prot.VMHostedContainerSettingsV2{ + OCIBundlePath: sScratch, + OCISpecification: sSpec, + }) + b.Cleanup(func() { + cleanupContainer(ctx, b, host, sandbox) + }) + + sandboxInit := startContainer(ctx, b, sandbox, stdio.ConnectionSettings{}) + b.Cleanup(func() { + killContainer(ctx, b, sandbox) + waitContainer(ctx, b, sandbox, sandboxInit, true) + }) + + b.Run("Create", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id, r, cleanup := _workloadContainerRequest(ctx, b, host, sid, uint32(sandboxInit.Pid()), nns) + + b.StartTimer() + c := createContainer(ctx, b, host, id, r) + b.StopTimer() + + // create launches background go-routines + // so kill container to end those and avoid future perf hits + killContainer(ctx, b, c) + // edge case where workload container transitions from "created" to "paused" + // then "stopped" + waitContainerRaw(c, c.InitProcess()) + cleanupContainer(ctx, b, host, c) + cleanup() + } + }) + + b.Run("Start", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id, r, cleanup := _workloadContainerRequest(ctx, b, host, sid, uint32(sandboxInit.Pid()), nns) + c := createContainer(ctx, b, host, id, r) + + b.StartTimer() + p := startContainer(ctx, b, c, stdio.ConnectionSettings{}) + b.StopTimer() + + killContainer(ctx, b, c) + waitContainer(ctx, b, c, p, true) + cleanupContainer(ctx, b, host, c) + cleanup() + } + }) + + b.Run("Kill", func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + for i := 0; i < b.N; i++ { + id, r, cleanup := _workloadContainerRequest(ctx, b, host, sid, uint32(sandboxInit.Pid()), nns) + c := createContainer(ctx, b, host, id, r) + p := startContainer(ctx, b, c, stdio.ConnectionSettings{}) + + b.StartTimer() + killContainer(ctx, b, c) + _, n := waitContainerRaw(c, p) + b.StopTimer() + + switch n { + case prot.NtForcedExit: + default: + b.Fatalf("container exit was %q, expected %q", n, prot.NtForcedExit) + } + + cleanupContainer(ctx, b, host, c) + cleanup() + } + }) +} + +func _workloadContainerRequest(ctx context.Context, t testing.TB, host *hcsv2.Host, sid string, spid uint32, nns string) (string, *prot.VMHostedContainerSettingsV2, func()) { + id := sid + cri_util.GenerateID() + scratch, rootfs := mountRootfs(ctx, t, host, id) + spec := containerSpec(ctx, t, + sid, + spid, + "test-bench-container", + id, + []string{"/bin/sh", "-c"}, + []string{tailNull}, + "/", + nns, + rootfs, + ) + r := &prot.VMHostedContainerSettingsV2{ + OCIBundlePath: scratch, + OCISpecification: spec, + } + f := func() { + unmountRootfs(ctx, t, scratch) + } + + return id, r, f +} diff --git a/test/gcs/cri_test.go b/test/gcs/cri_test.go new file mode 100644 index 0000000000..bb9d0f3533 --- /dev/null +++ b/test/gcs/cri_test.go @@ -0,0 +1,95 @@ +//go:build linux + +package gcs + +import ( + "context" + "testing" + + "github.com/Microsoft/hcsshim/internal/guest/prot" + "github.com/Microsoft/hcsshim/internal/guest/stdio" +) + +// +// tests for operations on sandbox and workload (CRI) containers +// + +func TestCRILifecycle(t *testing.T) { + requireFeatures(t, featureCRI) + + ctx := context.Background() + host, rtime := getTestState(ctx, t) + assertNumberContainers(ctx, t, rtime, 0) + + sid := t.Name() + scratch, rootfs := mountRootfs(ctx, t, host, sid) + t.Cleanup(func() { + unmountRootfs(ctx, t, scratch) + }) + createNamespace(ctx, t, sid) + t.Cleanup(func() { + removeNamespace(ctx, t, sid) + }) + + spec := sandboxSpec(ctx, t, "test-sandbox", sid, sid, rootfs) + sandbox := createContainer(ctx, t, host, sid, &prot.VMHostedContainerSettingsV2{ + OCIBundlePath: scratch, + OCISpecification: spec, + }) + t.Cleanup(func() { + cleanupContainer(ctx, t, host, sandbox) + assertNumberContainers(ctx, t, rtime, 0) + }) + + assertNumberContainers(ctx, t, rtime, 1) + assertContainerState(ctx, t, rtime, sid, "created") + + sandboxInit := startContainer(ctx, t, sandbox, stdio.ConnectionSettings{}) + t.Cleanup(func() { + killContainer(ctx, t, sandbox) + waitContainer(ctx, t, sandbox, sandboxInit, true) + }) + + assertContainerState(ctx, t, rtime, sid, "running") + cs := getContainerState(ctx, t, rtime, sid) + pid := sandboxInit.Pid() + if pid != cs.Pid { + t.Fatalf("got sandbox pid %d, wanted %d", pid, cs.Pid) + } + + cid := "container" + sid + cscratch, crootfs := mountRootfs(ctx, t, host, cid) + t.Cleanup(func() { + unmountRootfs(ctx, t, cscratch) + }) + + cspec := containerSpec(ctx, t, sid, uint32(sandboxInit.Pid()), "test-container", cid, + []string{"/bin/sh", "-c"}, + []string{tailNull}, + "/", sid, crootfs, + ) + workload := createContainer(ctx, t, host, cid, &prot.VMHostedContainerSettingsV2{ + OCIBundlePath: cscratch, + OCISpecification: cspec, + }) + t.Cleanup(func() { + cleanupContainer(ctx, t, host, workload) + assertNumberContainers(ctx, t, rtime, 1) + }) + + assertNumberContainers(ctx, t, rtime, 2) + assertContainerState(ctx, t, rtime, cid, "created") + + workloadInit := startContainer(ctx, t, workload, stdio.ConnectionSettings{}) + assertContainerState(ctx, t, rtime, cid, "running") + t.Cleanup(func() { + killContainer(ctx, t, workload) + waitContainer(ctx, t, workload, workloadInit, true) + }) + + cs = getContainerState(ctx, t, rtime, cid) + pid = workloadInit.Pid() + if pid != cs.Pid { + t.Fatalf("got sandbox pid %d, wanted %d", pid, cs.Pid) + } +} diff --git a/test/gcs/doc.go b/test/gcs/doc.go new file mode 100644 index 0000000000..19d05fcc86 --- /dev/null +++ b/test/gcs/doc.go @@ -0,0 +1,3 @@ +// This package builds a test binary that can be run directly on uVM guest, +// alongside ./cmd/gcs, for testing and benchmarking. +package gcs diff --git a/test/gcs/helper_conn_test.go b/test/gcs/helper_conn_test.go new file mode 100644 index 0000000000..97f2cce4b2 --- /dev/null +++ b/test/gcs/helper_conn_test.go @@ -0,0 +1,333 @@ +//go:build linux + +package gcs + +import ( + "context" + "errors" + "io" + "os" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + + "github.com/Microsoft/hcsshim/internal/guest/stdio" + "github.com/Microsoft/hcsshim/internal/guest/transport" +) + +const ( + _network = "unix" + _sockPathFmt = "/tmp/gcs.%d" + + _dialRetries = 4 + _dialWait = 50 * time.Millisecond +) + +// port numbers to assign to connections +var ( + _pipes sync.Map + _portNumber uint32 = 1 +) + +type PipeTransport struct{} + +var _ transport.Transport = &PipeTransport{} + +func (t *PipeTransport) Dial(port uint32) (c transport.Connection, err error) { + for i := 0; i < _dialRetries; i++ { + c, err = getFakeSocket(port) + + switch { + case errors.Is(err, unix.ENOENT): // socket hasn't been created + time.Sleep(_dialWait) + continue + } + break + } + if err != nil { + return nil, err + } + + logrus.Debugf("dialed port %d", port) + return c, nil +} + +type fakeIO struct { + stdin, stdout, stderr *fakeSocket +} + +func createStdIO(ctx context.Context, t testing.TB, con stdio.ConnectionSettings) *fakeIO { + // (stdin io.WriteCloser, stdout io.ReadCloser, stderr io.ReadCloser) { + f := &fakeIO{} + if con.StdIn != nil { + f.stdin = newFakeSocket(ctx, t, *con.StdIn, "stdin") + } + if con.StdOut != nil { + f.stdout = newFakeSocket(ctx, t, *con.StdOut, "stdout") + } + if con.StdErr != nil { + f.stderr = newFakeSocket(ctx, t, *con.StdErr, "stderr") + } + + return f +} + +func (f *fakeIO) WriteIn(_ context.Context, t testing.TB, s string) { + if f.stdin == nil { + return + } + + b := []byte(s) + n := len(b) + + nn, err := f.stdin.Write(b) + if err != nil { + t.Helper() + t.Errorf("write to std in: %v", err) + } + if n != nn { + t.Helper() + t.Errorf("only wrote %d bytes, expected %d", nn, n) + } +} + +func (f *fakeIO) CloseIn(_ context.Context, t testing.TB) { + if f.stdin == nil { + return + } + + if err := f.stdin.CloseWrite(); err != nil { + t.Helper() + t.Errorf("close write std in: %v", err) + } + + if err := f.stdin.Close(); err != nil { + t.Helper() + t.Errorf("close std in: %v", err) + } +} + +func (f *fakeIO) ReadAllOut(ctx context.Context, t testing.TB) string { + return f.stdout.readAll(ctx, t) +} + +func (f *fakeIO) ReadAllErr(ctx context.Context, t testing.TB) string { + return f.stderr.readAll(ctx, t) +} + +type fakeSocket struct { + id uint32 + n string + ch chan struct{} // closed when dialed (via getFakeSocket) + // m sync.RWMutex + r, w *os.File +} + +var _ transport.Connection = &fakeSocket{} + +func newFakeSocket(_ context.Context, t testing.TB, id uint32, n string) *fakeSocket { + t.Helper() + + _, ok := _pipes.Load(id) + if ok { + t.Fatalf("socket %d already exits", id) + } + + r, w, err := os.Pipe() + if err != nil { + t.Fatalf("could not create socket: %v", err) + } + + s := &fakeSocket{ + id: id, + n: n, + r: r, + w: w, + ch: make(chan struct{}), + } + _pipes.Store(id, s) + + return s +} + +func getFakeSocket(id uint32) (*fakeSocket, error) { + // logrus.Debugf("getting fake socket %d", id) + f, ok := _pipes.Load(id) + if !ok { + return nil, unix.ENOENT + } + + s := f.(*fakeSocket) + select { + case <-s.ch: + default: + close(s.ch) + } + + return s, nil +} + +func (s *fakeSocket) Read(b []byte) (int, error) { + // logrus.Debugf("reading from fake socket %d", s.id) + <-s.ch + return s.r.Read(b) +} + +func (s *fakeSocket) Write(b []byte) (int, error) { + // logrus.Debugf("writing to fake socket %d", s.id) + <-s.ch + return s.w.Write(b) +} + +func (s *fakeSocket) Close() (err error) { + // logrus.Debugf("closing fake socket %d", s.id) + if _, ok := _pipes.LoadAndDelete(s.id); ok { + return nil + } + + err = s.r.Close() + if err := s.w.Close(); err != nil { + return err + } + + return err +} + +func (s *fakeSocket) CloseRead() error { + return s.r.Close() +} + +func (s *fakeSocket) CloseWrite() error { + return s.w.Close() +} + +func (s *fakeSocket) File() (*os.File, error) { + return nil, errors.New("fakeSocket does not support File()") +} + +func (s *fakeSocket) readAll(ctx context.Context, t testing.TB) string { + return string(s.readAllByte(ctx, t)) +} + +func (s *fakeSocket) readAllByte(ctx context.Context, t testing.TB) (b []byte) { + if s == nil { + return nil + } + + var err error + ch := make(chan struct{}) + go func() { + defer close(ch) + b, err = io.ReadAll(s) + }() + + select { + case <-ch: + if err != nil { + t.Helper() + t.Errorf("read all %s: %v", s.n, err) + } + case <-ctx.Done(): + t.Helper() + t.Errorf("read all %s context cancelled: %v", s.n, ctx.Err()) + } + + return b +} + +func newConnectionSettings(in, out, err bool) stdio.ConnectionSettings { + c := stdio.ConnectionSettings{} + + if in { + p := nextPortNumber() + c.StdIn = &p + } + if out { + p := nextPortNumber() + c.StdOut = &p + } + if err { + p := nextPortNumber() + c.StdErr = &p + } + + return c +} + +func nextPortNumber() uint32 { + return atomic.AddUint32(&_portNumber, 2) +} + +func TestFakeSocket(t *testing.T) { + ctx := context.Background() + tpt := getTransport() + + ch := make(chan struct{}) + chs := make(chan struct{}) + con := newConnectionSettings(true, true, false) + + // host + f := createStdIO(ctx, t, con) + // t.Logf("got std io %v %v %v", stdin, stdout, stderr) + + var err error + go func() { // guest + defer close(ch) + + var cin, cout transport.Connection + cin, err = tpt.Dial(*con.StdIn) + if err != nil { + t.Logf("dial error %v", err) + + return + } + defer cin.Close() + // t.Logf("dialed conn %#+v", cin) + + cout, err = tpt.Dial(*con.StdOut) + if err != nil { + t.Logf("dial error %v", err) + + return + } + defer cout.Close() + // t.Logf("dialed conn %#+v", cout) + + close(chs) + var b []byte + b, err = io.ReadAll(cin) + if err != nil { + t.Logf("read all error: %v", err) + + return + } + t.Logf("guest read %s", b) + + _, err = cout.Write(b) + cout.CloseWrite() + + return + }() + + <-chs // wait for guest to dial + f.WriteIn(ctx, t, "hello") + f.WriteIn(ctx, t, " world") + f.CloseIn(ctx, t) + t.Logf("host wrote") + + <-ch + t.Logf("go routine closed") + if err != nil { + t.Fatalf("go routine error: %v", err) + } + + s := f.ReadAllOut(ctx, t) + t.Logf("host read %q", s) + if s != "hello world" { + t.Fatalf("got %q, wanted %q", s, "hello world") + } +} diff --git a/test/gcs/helper_container_test.go b/test/gcs/helper_container_test.go new file mode 100644 index 0000000000..ceff5527a6 --- /dev/null +++ b/test/gcs/helper_container_test.go @@ -0,0 +1,290 @@ +//go:build linux + +package gcs + +import ( + "context" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/containerd/containerd/namespaces" + ctrdoci "github.com/containerd/containerd/oci" + oci "github.com/opencontainers/runtime-spec/specs-go" + + "github.com/Microsoft/hcsshim/internal/guest/prot" + "github.com/Microsoft/hcsshim/internal/guest/runtime" + "github.com/Microsoft/hcsshim/internal/guest/runtime/hcsv2" + "github.com/Microsoft/hcsshim/internal/guest/stdio" + "github.com/Microsoft/hcsshim/internal/guest/storage" + "github.com/Microsoft/hcsshim/internal/guest/storage/overlay" + "github.com/Microsoft/hcsshim/internal/guestpath" + + testoci "github.com/Microsoft/hcsshim/test/internal/oci" +) + +// todo: autogenerate/fuzz realistic specs + +// +// testing helper functions for generic container management +// + +const tailNull = "tail -f /dev/null" + +// will call unmountRootfs during cleanup +func createStandaloneContainer(ctx context.Context, t testing.TB, host *hcsv2.Host, id string, extra ...ctrdoci.SpecOpts) *hcsv2.Container { + ctx = namespaces.WithNamespace(ctx, testoci.DefaultNamespace) + scratch, rootfs := mountRootfs(ctx, t, host, id) + // spec is passed in from containerd and then updated in internal\hcsoci\create.go:CreateContainer() + opts := testoci.DefaultLinuxSpecOpts(id, + ctrdoci.WithRootFSPath(rootfs), + ctrdoci.WithProcessArgs("/bin/sh", "-c", tailNull), + ) + opts = append(opts, extra...) + s := testoci.CreateLinuxSpec(ctx, t, id, opts...) + r := &prot.VMHostedContainerSettingsV2{ + OCIBundlePath: scratch, + OCISpecification: s, + } + + t.Cleanup(func() { + unmountRootfs(ctx, t, scratch) + // hcsv2.RemoveNetworkNamespace(ctx, id) + }) + + return createContainer(ctx, t, host, id, r) +} + +func createContainer(ctx context.Context, t testing.TB, host *hcsv2.Host, id string, s *prot.VMHostedContainerSettingsV2) *hcsv2.Container { + c, err := host.CreateContainer(ctx, id, s) + if err != nil { + t.Helper() + t.Fatalf("could not create container %q: %v", id, err) + } + + return c +} + +func getContainer(_ context.Context, t testing.TB, host *hcsv2.Host, id string) *hcsv2.Container { + c, err := host.GetContainer(id) + if err != nil { + t.Helper() + t.Fatalf("could not get container %q: %v", id, err) + } + + return c +} + +func removeContainer(_ context.Context, _ testing.TB, host *hcsv2.Host, id string) { + host.RemoveContainer(id) +} + +func startContainer(ctx context.Context, t testing.TB, c *hcsv2.Container, conn stdio.ConnectionSettings) hcsv2.Process { + pid, err := c.Start(ctx, conn) + if err != nil { + t.Helper() + t.Fatalf("could not start container %q: %v", c.ID(), err) + } + + return getProcess(ctx, t, c, uint32(pid)) +} + +// waitContainer waits on the container's init process, p +func waitContainer(ctx context.Context, t testing.TB, c *hcsv2.Container, p hcsv2.Process, forced bool) { + t.Helper() + + var e int + ch := make(chan prot.NotificationType) + + // have to read the init process exit code to close the container + exch, dch := p.Wait() + defer close(dch) + go func() { + e = <-exch + dch <- true + ch <- c.Wait() + close(ch) + }() + + select { + case n, ok := <-ch: + if !ok { + t.Fatalf("container %q did not return a notification", c.ID()) + } + + switch { + // UnexpectedExit is the default, ForcedExit if killed + case n == prot.NtGracefulExit: + case n == prot.NtUnexpectedExit: + case forced && n == prot.NtForcedExit: + default: + t.Fatalf("container %q exited with %s", c.ID(), n) + } + case <-ctx.Done(): + t.Fatalf("context canceled: %v", ctx.Err()) + } + + switch { + case e == 0: + case forced && e == 137: + default: + t.Fatalf("got exit code %d", e) + } +} + +func waitContainerRaw(c *hcsv2.Container, p hcsv2.Process) (int, prot.NotificationType) { + exch, dch := p.Wait() + defer close(dch) + r := <-exch + dch <- true + n := c.Wait() + + return r, n +} + +func execProcess(ctx context.Context, t testing.TB, c *hcsv2.Container, p *oci.Process, con stdio.ConnectionSettings) hcsv2.Process { + pid, err := c.ExecProcess(ctx, p, con) + if err != nil { + t.Helper() + t.Fatalf("could not exec process: %v", err) + } + + return getProcess(ctx, t, c, uint32(pid)) +} + +func getProcess(_ context.Context, t testing.TB, c *hcsv2.Container, pid uint32) hcsv2.Process { + p, err := c.GetProcess(pid) + if err != nil { + t.Helper() + t.Fatalf("could not get process %d: %v", pid, err) + } + + return p +} + +func killContainer(ctx context.Context, t testing.TB, c *hcsv2.Container) { + if err := c.Kill(ctx, syscall.SIGKILL); err != nil { + t.Helper() + t.Fatalf("could not kill container %q: %v", c.ID(), err) + } +} + +func deleteContainer(ctx context.Context, t testing.TB, c *hcsv2.Container) { + if err := c.Delete(ctx); err != nil { + t.Helper() + t.Fatalf("could not delete container %q: %v", c.ID(), err) + } +} + +func cleanupContainer(ctx context.Context, t testing.TB, host *hcsv2.Host, c *hcsv2.Container) { + deleteContainer(ctx, t, c) + removeContainer(ctx, t, host, c.ID()) +} + +// +// runtime +// + +func listContainerStates(_ context.Context, t testing.TB, rt runtime.Runtime) []runtime.ContainerState { + css, err := rt.ListContainerStates() + if err != nil { + t.Helper() + t.Fatalf("could not list containers: %v", err) + } + + return css +} + +func assertNumberContainers(ctx context.Context, t testing.TB, rt runtime.Runtime, n int) { + fmt := "found %d running containers, wanted %d" + css := listContainerStates(ctx, t, rt) + nn := len(css) + if nn != n { + t.Helper() + + if nn == 0 { + t.Fatalf(fmt, nn, n) + } + + cs := make([]string, nn) + for i, c := range css { + cs[i] = c.ID + } + + t.Fatalf(fmt+":\n%#+v", nn, n, cs) + } +} + +func getContainerState(ctx context.Context, t testing.TB, rt runtime.Runtime, id string) runtime.ContainerState { + css := listContainerStates(ctx, t, rt) + + for _, cs := range css { + if cs.ID == id { + return cs + } + } + + t.Helper() + t.Fatalf("could not find container %q", id) + return runtime.ContainerState{} // jus to make the linter happy +} + +func assertContainerState(ctx context.Context, t testing.TB, rt runtime.Runtime, id, state string) { + cs := getContainerState(ctx, t, rt, id) + if cs.Status != state { + t.Helper() + t.Fatalf("got container %q status %q, wanted %q", id, cs.Status, state) + } +} + +// +// mount management +// + +func mountRootfs(ctx context.Context, t testing.TB, host *hcsv2.Host, id string) (scratch string, rootfs string) { + scratch = filepath.Join(guestpath.LCOWRootPrefixInUVM, id) + rootfs = filepath.Join(scratch, "rootfs") + if err := overlay.MountLayer(ctx, + []string{*flagRootfsPath}, + filepath.Join(scratch, "upper"), + filepath.Join(scratch, "work"), + rootfs, + false, // readonly + id, + host.SecurityPolicyEnforcer(), + ); err != nil { + t.Helper() + t.Fatalf("could not mount overlay layers from %q: %v", *flagRootfsPath, err) + } + + return scratch, rootfs +} + +func unmountRootfs(ctx context.Context, t testing.TB, path string) { + if err := storage.UnmountAllInPath(ctx, path, true); err != nil { + t.Fatalf("could not unmount container rootfs: %v", err) + } + if err := os.RemoveAll(path); err != nil { + t.Fatalf("could not remove container directory: %v", err) + } +} + +// +// network namespaces +// + +func createNamespace(ctx context.Context, t testing.TB, nns string) { + ns := hcsv2.GetOrAddNetworkNamespace(nns) + if err := ns.Sync(ctx); err != nil { + t.Helper() + t.Fatalf("could not sync new namespace %q: %v", nns, err) + } +} + +func removeNamespace(ctx context.Context, t testing.TB, nns string) { + // if err := hcsv2.RemoveNetworkNamespace(ctx, nns); err != nil { + // t.Helper() + // t.Fatalf("could not remove namespace %q: %v", nns, err) + // } +} diff --git a/test/gcs/helper_cri_test.go b/test/gcs/helper_cri_test.go new file mode 100644 index 0000000000..cf9dd09147 --- /dev/null +++ b/test/gcs/helper_cri_test.go @@ -0,0 +1,134 @@ +//go:build linux + +package gcs + +import ( + "context" + "testing" + + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/pkg/cri/annotations" + criopts "github.com/containerd/containerd/pkg/cri/opts" + + testoci "github.com/Microsoft/hcsshim/test/internal/oci" +) + +// +// testing helper functions for generic container management +// + +func sandboxSpec( + ctx context.Context, + t testing.TB, + name string, + id string, + nns string, + root string, + extra ...oci.SpecOpts, +) *oci.Spec { + ctx = namespaces.WithNamespace(ctx, testoci.CRINamespace) + opts := sandboxSpecOpts(ctx, t, name, id, nns, root) + opts = append(opts, extra...) + + return testoci.CreateLinuxSpec(ctx, t, id, opts...) +} + +func sandboxSpecOpts(_ context.Context, t testing.TB, + name string, + id string, + nns string, + root string, +) []oci.SpecOpts { + img := testoci.LinuxSandboxImageConfig(*flagSandboxPause) + cfg := testoci.LinuxSandboxRuntimeConfig(name) + + opts := testoci.DefaultLinuxSpecOpts(nns, + oci.WithEnv(img.Env), + oci.WithHostname(cfg.GetHostname()), + oci.WithRootFSPath(root), + ) + + if usr := img.User; usr != "" { + oci.WithUser(usr) + } + + if img.WorkingDir != "" { + opts = append(opts, oci.WithProcessCwd(img.WorkingDir)) + } + + if len(img.Entrypoint) == 0 && len(img.Cmd) == 0 { + t.Helper() + t.Fatalf("invalid empty entrypoint and cmd in image config %+v", img) + } + opts = append(opts, oci.WithProcessArgs(append(img.Entrypoint, img.Cmd...)...)) + + opts = append(opts, + criopts.WithAnnotation(annotations.ContainerType, annotations.ContainerTypeSandbox), + criopts.WithAnnotation(annotations.SandboxID, id), + criopts.WithAnnotation(annotations.SandboxNamespace, cfg.GetMetadata().GetNamespace()), + criopts.WithAnnotation(annotations.SandboxName, cfg.GetMetadata().GetName()), + criopts.WithAnnotation(annotations.SandboxLogDir, cfg.GetLogDirectory()), + ) + + return opts +} + +func containerSpec( + ctx context.Context, + t testing.TB, + sandboxID string, + sandboxPID uint32, + name string, + id string, + cmd []string, + args []string, + wd string, + nns string, + root string, + extra ...oci.SpecOpts, +) *oci.Spec { + ctx = namespaces.WithNamespace(ctx, testoci.CRINamespace) + opts := containerSpecOpts(ctx, t, sandboxID, sandboxPID, name, cmd, args, wd, nns, root) + opts = append(opts, extra...) + + return testoci.CreateLinuxSpec(ctx, t, id, opts...) +} + +func containerSpecOpts(_ context.Context, _ testing.TB, + sandboxID string, + sandboxPID uint32, + name string, + cmd []string, + args []string, + wd string, + nns string, + root string, +) []oci.SpecOpts { + cfg := testoci.LinuxWorkloadRuntimeConfig(name, cmd, args, wd) + img := testoci.LinuxWorkloadImageConfig() + + opts := testoci.DefaultLinuxSpecOpts(nns, + oci.WithRootFSPath(root), + oci.WithEnv(nil), + // this will be set based on the security context below + oci.WithNewPrivileges, + criopts.WithProcessArgs(cfg, img), + criopts.WithPodNamespaces(nil, sandboxPID, sandboxPID), + ) + + hostname := name + env := append([]string{testoci.HostnameEnv + "=" + hostname}, img.Env...) + for _, e := range cfg.GetEnvs() { + env = append(env, e.GetKey()+"="+e.GetValue()) + } + opts = append(opts, oci.WithEnv(env)) + + opts = append(opts, + criopts.WithAnnotation(annotations.ContainerType, annotations.ContainerTypeContainer), + criopts.WithAnnotation(annotations.SandboxID, sandboxID), + criopts.WithAnnotation(annotations.ContainerName, name), + ) + + return opts +} diff --git a/test/gcs/main_test.go b/test/gcs/main_test.go new file mode 100644 index 0000000000..76756ce8d5 --- /dev/null +++ b/test/gcs/main_test.go @@ -0,0 +1,177 @@ +//go:build linux + +package gcs + +import ( + "context" + "flag" + "fmt" + "log" + "os" + "strconv" + "testing" + + "github.com/containerd/cgroups" + "github.com/sirupsen/logrus" + + "github.com/Microsoft/hcsshim/internal/guest/runtime" + "github.com/Microsoft/hcsshim/internal/guest/runtime/hcsv2" + "github.com/Microsoft/hcsshim/internal/guest/runtime/runc" + "github.com/Microsoft/hcsshim/internal/guest/transport" + "github.com/Microsoft/hcsshim/internal/guestpath" + "github.com/Microsoft/hcsshim/pkg/securitypolicy" + + testflag "github.com/Microsoft/hcsshim/test/internal/flag" + "github.com/Microsoft/hcsshim/test/internal/require" +) + +const ( + featureCRI = "CRI" + featureStandalone = "StandAlone" +) + +var allFeatures = []string{ + featureCRI, + featureStandalone, +} + +// flags +var ( + flagSecurityPolicy string + flagFeatures = testflag.NewFeatureFlag(allFeatures) + flagJoinGCSCgroup = flag.Bool( + "join-gcs-cgroup", + false, + "If true, join the same cgroup as the gcs daemon, `/gcs`", + ) + flagRootfsPath = flag.String( + "rootfs-path", + "/run/rootfs", + "The path on the uVM of the unpacked rootfs to use for the containers", + ) + flagSandboxPause = flag.Bool( + "pause-sandbox", + false, + "Use `/pause` as the sandbox container command", + ) +) + +func init() { + p := securitypolicy.NewOpenDoorPolicy() + pStr, err := p.EncodeToString() + if err != nil { + // really should not get here ... + log.Fatal("could not encode open door policy to string: %w", err) + } + + flag.StringVar( + &flagSecurityPolicy, + "security-policy", + pStr, + "The base64-encoded security policy to use during testing", + ) +} + +func TestMain(m *testing.M) { + flag.Parse() + + if err := setup(); err != nil { + logrus.WithError(err).Fatal("could not set up testing") + } + + os.Exit(m.Run()) +} + +func setup() (err error) { + os.MkdirAll(guestpath.LCOWRootPrefixInUVM, 0755) + // os.MkdirAll(sockDir, 0755) + + if vf := flag.Lookup("test.v"); vf != nil { + if vf.Value.String() == strconv.FormatBool(true) { + logrus.SetLevel(logrus.DebugLevel) + } else { + logrus.SetLevel(logrus.ErrorLevel) + } + } + + // should already start gcs cgroup + if !*flagJoinGCSCgroup { + gcsControl, err := cgroups.Load(cgroups.V1, cgroups.StaticPath("/")) + if err != nil { + return fmt.Errorf("failed to load root cgroup: %w", err) + } + if err := gcsControl.Add(cgroups.Process{Pid: os.Getpid()}); err != nil { + return fmt.Errorf("failed join root cgroup: %w", err) + } + logrus.Debug("joined root cgroup") + } + + // initialize runtime + rt, err := _getRuntime() + if err != nil { + return err + } + + // check policy will be parsed properly + if _, err = _getHost(rt, getTransport()); err != nil { + return err + } + + return nil +} + +// +// host and runtime management +// + +func getTestState(ctx context.Context, t testing.TB) (*hcsv2.Host, runtime.Runtime) { + rt := getRuntime(ctx, t) + + return getHost(ctx, t, rt), rt +} + +func getHost(_ context.Context, t testing.TB, rt runtime.Runtime) *hcsv2.Host { + h, err := _getHost(rt, getTransport()) + if err != nil { + t.Helper() + t.Fatalf("could not get host: %v", err) + } + + return h +} + +func _getHost(rt runtime.Runtime, tp transport.Transport) (*hcsv2.Host, error) { + h := hcsv2.NewHost(rt, tp) + if err := h.SetSecurityPolicy(flagSecurityPolicy); err != nil { + return nil, fmt.Errorf("could not set host security policy: %w", err) + } + + return h, nil +} + +func getRuntime(_ context.Context, t testing.TB) runtime.Runtime { + rt, err := _getRuntime() + if err != nil { + t.Helper() + t.Fatalf("could not get runtime: %v", err) + } + + return rt +} + +func _getRuntime() (runtime.Runtime, error) { + rt, err := runc.NewRuntime(guestpath.LCOWRootPrefixInUVM) + if err != nil { + return rt, fmt.Errorf("failed to initialize runc runtime: %w", err) + } + + return rt, nil +} + +func getTransport() transport.Transport { + return &PipeTransport{} +} + +func requireFeatures(t testing.TB, features ...string) { + require.Features(t, flagFeatures.S, features...) +}