diff --git a/guest/linux/doc.go b/guest/linux/doc.go new file mode 100644 index 0000000000..9b05ff49a9 --- /dev/null +++ b/guest/linux/doc.go @@ -0,0 +1,2 @@ +// Package linux contains definitions required for making a linux ioctl. +package linux diff --git a/guest/linux/ioctl.go b/guest/linux/ioctl.go index c87a3da9f5..03ad923502 100644 --- a/guest/linux/ioctl.go +++ b/guest/linux/ioctl.go @@ -1,7 +1,6 @@ //go:build linux // +build linux -// Package linux contains definitions required for making a linux ioctl. package linux import ( diff --git a/guest/policy/doc.go b/guest/policy/doc.go new file mode 100644 index 0000000000..8cbf7ee3fc --- /dev/null +++ b/guest/policy/doc.go @@ -0,0 +1 @@ +package policy diff --git a/guest/runtime/hcsv2/container.go b/guest/runtime/hcsv2/container.go index cc4304c2f3..415dc613e1 100644 --- a/guest/runtime/hcsv2/container.go +++ b/guest/runtime/hcsv2/container.go @@ -123,6 +123,11 @@ func (c *Container) ExecProcess(ctx context.Context, process *oci.Process, conSe return pid, nil } +// InitProcess returns the container's init process +func (c *Container) InitProcess() Process { + return c.initProcess +} + // GetProcess returns the Process with the matching 'pid'. If the 'pid' does // not exit returns error. func (c *Container) GetProcess(pid uint32) (Process, error) { @@ -130,7 +135,7 @@ func (c *Container) GetProcess(pid uint32) (Process, error) { logrus.WithFields(logrus.Fields{ logfields.ContainerID: c.id, logfields.ProcessID: pid, - }).Info("opengcs::Container::GetProcesss") + }).Info("opengcs::Container::GetProcess") if c.initProcess.pid == pid { return c.initProcess, nil } @@ -245,3 +250,7 @@ func (c *Container) getStatus() containerStatus { func (c *Container) setStatus(st containerStatus) { atomic.StoreUint32((*uint32)(&c.status), uint32(st)) } + +func (c *Container) ID() string { + return c.id +} diff --git a/guest/runtime/hcsv2/network.go b/guest/runtime/hcsv2/network.go index 9feb7afaed..5edc7ff7a1 100644 --- a/guest/runtime/hcsv2/network.go +++ b/guest/runtime/hcsv2/network.go @@ -51,9 +51,9 @@ func getNetworkNamespace(id string) (*namespace, error) { return ns, nil } -// getOrAddNetworkNamespace returns the namespace found by `id` or creates a new +// GetOrAddNetworkNamespace returns the namespace found by `id` or creates a new // one and assigns `id. -func getOrAddNetworkNamespace(id string) *namespace { +func GetOrAddNetworkNamespace(id string) *namespace { id = strings.ToLower(id) namespaceSync.Lock() @@ -69,8 +69,8 @@ func getOrAddNetworkNamespace(id string) *namespace { return ns } -// removeNetworkNamespace removes the in-memory `namespace` found by `id`. -func removeNetworkNamespace(ctx context.Context, id string) (err error) { +// RemoveNetworkNamespace removes the in-memory `namespace` found by `id`. +func RemoveNetworkNamespace(ctx context.Context, id string) (err error) { _, span := trace.StartSpan(ctx, "hcsv2::removeNetworkNamespace") defer span.End() defer func() { oc.SetSpanStatus(span, err) }() @@ -123,7 +123,7 @@ func (n *namespace) AssignContainerPid(ctx context.Context, pid int) (err error) defer n.m.Unlock() if n.pid != 0 { - return errors.Errorf("previously assigned container pid: %d", n.pid) + return errors.Errorf("previously assigned container pid %d to network namespace %q", n.pid, n.id) } n.pid = pid diff --git a/guest/runtime/hcsv2/network_test.go b/guest/runtime/hcsv2/network_test.go index 3ac4b9eed1..899bf08a0a 100644 --- a/guest/runtime/hcsv2/network_test.go +++ b/guest/runtime/hcsv2/network_test.go @@ -12,7 +12,7 @@ import ( func Test_getNetworkNamespace_NotExist(t *testing.T) { defer func() { - err := removeNetworkNamespace(context.Background(), t.Name()) + err := RemoveNetworkNamespace(context.Background(), t.Name()) if err != nil { t.Errorf("failed to remove ns with error: %v", err) } @@ -29,13 +29,13 @@ func Test_getNetworkNamespace_NotExist(t *testing.T) { func Test_getNetworkNamespace_PreviousExist(t *testing.T) { defer func() { - err := removeNetworkNamespace(context.Background(), t.Name()) + err := RemoveNetworkNamespace(context.Background(), t.Name()) if err != nil { t.Errorf("failed to remove ns with error: %v", err) } }() - ns1 := getOrAddNetworkNamespace(t.Name()) + ns1 := GetOrAddNetworkNamespace(t.Name()) if ns1 == nil { t.Fatal("namespace ns1 should not be nil") } @@ -50,13 +50,13 @@ func Test_getNetworkNamespace_PreviousExist(t *testing.T) { func Test_getOrAddNetworkNamespace_NotExist(t *testing.T) { defer func() { - err := removeNetworkNamespace(context.Background(), t.Name()) + err := RemoveNetworkNamespace(context.Background(), t.Name()) if err != nil { t.Errorf("failed to remove ns with error: %v", err) } }() - ns := getOrAddNetworkNamespace(t.Name()) + ns := GetOrAddNetworkNamespace(t.Name()) if ns == nil { t.Fatalf("namespace should not be nil") } @@ -64,21 +64,21 @@ func Test_getOrAddNetworkNamespace_NotExist(t *testing.T) { func Test_getOrAddNetworkNamespace_PreviousExist(t *testing.T) { defer func() { - err := removeNetworkNamespace(context.Background(), t.Name()) + err := RemoveNetworkNamespace(context.Background(), t.Name()) if err != nil { t.Errorf("failed to remove ns with error: %v", err) } }() - ns1 := getOrAddNetworkNamespace(t.Name()) - ns2 := getOrAddNetworkNamespace(t.Name()) + ns1 := GetOrAddNetworkNamespace(t.Name()) + ns2 := GetOrAddNetworkNamespace(t.Name()) if ns1 != ns2 { t.Fatalf("ns1 %+v != ns2 %+v", ns1, ns2) } } func Test_removeNetworkNamespace_NotExist(t *testing.T) { - err := removeNetworkNamespace(context.Background(), t.Name()) + err := RemoveNetworkNamespace(context.Background(), t.Name()) if err != nil { t.Fatalf("failed to remove non-existing ns with error: %v", err) } @@ -86,7 +86,7 @@ func Test_removeNetworkNamespace_NotExist(t *testing.T) { func Test_removeNetworkNamespace_HasAdapters(t *testing.T) { defer func() { - err := removeNetworkNamespace(context.Background(), t.Name()) + err := RemoveNetworkNamespace(context.Background(), t.Name()) if err != nil { t.Errorf("failed to remove ns with error: %v", err) } @@ -96,7 +96,7 @@ func Test_removeNetworkNamespace_HasAdapters(t *testing.T) { networkInstanceIDToName = nsOld }() - ns := getOrAddNetworkNamespace(t.Name()) + ns := GetOrAddNetworkNamespace(t.Name()) networkInstanceIDToName = func(ctx context.Context, id string, _ bool) (string, error) { return "/dev/sdz", nil @@ -105,7 +105,7 @@ func Test_removeNetworkNamespace_HasAdapters(t *testing.T) { if err != nil { t.Fatalf("failed to add adapter: %v", err) } - err = removeNetworkNamespace(context.Background(), t.Name()) + err = RemoveNetworkNamespace(context.Background(), t.Name()) if err == nil { t.Fatal("should have failed to delete namespace with adapters") } @@ -113,7 +113,7 @@ func Test_removeNetworkNamespace_HasAdapters(t *testing.T) { if err != nil { t.Fatalf("failed to remove adapter: %v", err) } - err = removeNetworkNamespace(context.Background(), t.Name()) + err = RemoveNetworkNamespace(context.Background(), t.Name()) if err != nil { t.Fatalf("should not have failed to delete empty namepace got: %v", err) } diff --git a/guest/runtime/hcsv2/process.go b/guest/runtime/hcsv2/process.go index e68a63070c..f39a177ae5 100644 --- a/guest/runtime/hcsv2/process.go +++ b/guest/runtime/hcsv2/process.go @@ -67,6 +67,8 @@ type containerProcess struct { writersCalled bool } +var _ Process = &containerProcess{} + // newProcess returns a containerProcess struct that has been initialized with // an outstanding wait for process exit, and post exit an outstanding wait for // process cleanup to release all resources once at least 1 waiter has @@ -262,6 +264,8 @@ type externalProcess struct { remove func(pid int) } +var _ Process = &externalProcess{} + func (ep *externalProcess) Kill(ctx context.Context, signal syscall.Signal) error { if err := syscall.Kill(int(ep.cmd.Process.Pid), signal); err != nil { if err == syscall.ESRCH { diff --git a/guest/runtime/hcsv2/standalone_container.go b/guest/runtime/hcsv2/standalone_container.go index 0e232e7eda..a3d2bc1ba9 100644 --- a/guest/runtime/hcsv2/standalone_container.go +++ b/guest/runtime/hcsv2/standalone_container.go @@ -103,7 +103,7 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec // Write resolv.conf if !specInternal.MountPresent("/etc/resolv.conf", spec.Mounts) { - ns := getOrAddNetworkNamespace(getNetworkNamespaceID(spec)) + ns := GetOrAddNetworkNamespace(getNetworkNamespaceID(spec)) var searches, servers []string for _, n := range ns.Adapters() { if len(n.DNSSuffix) > 0 { diff --git a/guest/runtime/hcsv2/uvm.go b/guest/runtime/hcsv2/uvm.go index 380112a00d..4725ae16f9 100644 --- a/guest/runtime/hcsv2/uvm.go +++ b/guest/runtime/hcsv2/uvm.go @@ -115,10 +115,29 @@ func (h *Host) SetSecurityPolicy(base64Policy string) error { return nil } +func (h *Host) SecurityPolicyEnforcer() securitypolicy.SecurityPolicyEnforcer { + return h.securityPolicyEnforcer +} + +func (h *Host) Transport() transport.Transport { + return h.vsock +} + func (h *Host) RemoveContainer(id string) { h.containersMutex.Lock() defer h.containersMutex.Unlock() + c, ok := h.containers[id] + if !ok { + return + } + + // delete the network namespace for standalone and sandbox containers + criType, isCRI := c.spec.Annotations[annotations.KubernetesContainerType] + if !isCRI || criType == "sandbox" { + RemoveNetworkNamespace(context.Background(), id) + } + delete(h.containers, id) } @@ -580,7 +599,7 @@ func modifyCombinedLayers(ctx context.Context, rt guestrequest.RequestType, cl * func modifyNetwork(ctx context.Context, rt guestrequest.RequestType, na *guestresource.LCOWNetworkAdapter) (err error) { switch rt { case guestrequest.RequestTypeAdd: - ns := getOrAddNetworkNamespace(na.NamespaceID) + ns := GetOrAddNetworkNamespace(na.NamespaceID) if err := ns.AddAdapter(ctx, na); err != nil { return err } @@ -588,7 +607,7 @@ func modifyNetwork(ctx context.Context, rt guestrequest.RequestType, na *guestre // container or not so it must always call `Sync`. return ns.Sync(ctx) case guestrequest.RequestTypeRemove: - ns := getOrAddNetworkNamespace(na.ID) + ns := GetOrAddNetworkNamespace(na.ID) if err := ns.RemoveAdapter(ctx, na.ID); err != nil { return err } diff --git a/guest/runtime/runc/container.go b/guest/runtime/runc/container.go new file mode 100644 index 0000000000..92f79f7b78 --- /dev/null +++ b/guest/runtime/runc/container.go @@ -0,0 +1,460 @@ +//go:build linux +// +build linux + +package runc + +import ( + "encoding/json" + "io/ioutil" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/Microsoft/hcsshim/internal/guest/runtime" + "github.com/Microsoft/hcsshim/internal/guest/stdio" + "github.com/Microsoft/hcsshim/internal/logfields" + oci "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +type container struct { + r *runcRuntime + id string + init *process + // ownsPidNamespace indicates whether the container's init process is also + // the init process for its pid namespace. + ownsPidNamespace bool +} + +var _ runtime.Container = &container{} + +func (c *container) ID() string { + return c.id +} + +func (c *container) Pid() int { + return c.init.Pid() +} + +func (c *container) Tty() *stdio.TtyRelay { + return c.init.ttyRelay +} + +func (c *container) PipeRelay() *stdio.PipeRelay { + return c.init.pipeRelay +} + +// Start unblocks the container's init process created by the call to +// CreateContainer. +func (c *container) Start() error { + logPath := c.r.getLogPath(c.id) + args := []string{"start", c.id} + cmd := createRuncCommand(logPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + runcErr := getRuncLogError(logPath) + c.r.cleanupContainer(c.id) + return errors.Wrapf(runcErr, "runc start failed with %v: %s", err, string(out)) + } + + return nil +} + +// ExecProcess executes a new process, represented as an OCI process struct, +// inside an already-running container. +func (c *container) ExecProcess(process *oci.Process, stdioSet *stdio.ConnectionSet) (p runtime.Process, err error) { + p, err = c.runExecCommand(process, stdioSet) + if err != nil { + return nil, err + } + + return p, nil +} + +// Kill sends the specified signal to the container's init process. +func (c *container) Kill(signal syscall.Signal) error { + logrus.WithField(logfields.ContainerID, c.id).Debug("runc::container::Kill") + logPath := c.r.getLogPath(c.id) + args := []string{"kill"} + if signal == syscall.SIGTERM || signal == syscall.SIGKILL { + args = append(args, "--all") + } + args = append(args, c.id, strconv.Itoa(int(signal))) + cmd := createRuncCommand(logPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + runcErr := getRuncLogError(logPath) + return errors.Wrapf(runcErr, "unknown runc error after kill %v: %s", err, string(out)) + } + + return nil +} + +// Delete deletes any state created for the container by either this wrapper or +// runC itself. +func (c *container) Delete() error { + logrus.WithField(logfields.ContainerID, c.id).Debug("runc::container::Delete") + logPath := c.r.getLogPath(c.id) + args := []string{"delete", c.id} + cmd := createRuncCommand(logPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + runcErr := getRuncLogError(logPath) + return errors.Wrapf(runcErr, "runc delete failed with %v: %s", err, string(out)) + } + if err := c.r.cleanupContainer(c.id); err != nil { + return err + } + + return nil +} + +// Pause suspends all processes running in the container. +func (c *container) Pause() error { + logPath := c.r.getLogPath(c.id) + args := []string{"pause", c.id} + cmd := createRuncCommand(logPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + runcErr := getRuncLogError(logPath) + return errors.Wrapf(runcErr, "runc pause failed with %v: %s", err, string(out)) + } + return nil +} + +// Resume unsuspends processes running in the container. +func (c *container) Resume() error { + logPath := c.r.getLogPath(c.id) + args := []string{"resume", c.id} + cmd := createRuncCommand(logPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + runcErr := getRuncLogError(logPath) + return errors.Wrapf(runcErr, "runc resume failed with %v: %s", err, string(out)) + } + return nil +} + +// GetState returns information about the given container. +func (c *container) GetState() (*runtime.ContainerState, error) { + logPath := c.r.getLogPath(c.id) + args := []string{"state", c.id} + cmd := createRuncCommand(logPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + runcErr := getRuncLogError(logPath) + return nil, errors.Wrapf(runcErr, "runc state failed with %v: %s", err, string(out)) + } + var state runtime.ContainerState + if err := json.Unmarshal(out, &state); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal the state for container %s", c.id) + } + return &state, nil +} + +// Exists returns true if the container exists, false if it doesn't +// exist. +// It should be noted that containers that have stopped but have not been +// deleted are still considered to exist. +func (c *container) Exists() (bool, error) { + // use global path because container may not exist + logPath := c.r.getGlobalLogPath() + args := []string{"state", c.id} + cmd := createRuncCommand(logPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + runcErr := getRuncLogError(logPath) + if errors.Is(runcErr, runtime.ErrContainerDoesNotExist) { + return false, nil + } + return false, errors.Wrapf(runcErr, "runc state failed with %v: %s", err, string(out)) + } + return true, nil +} + +// GetRunningProcesses gets only the running processes associated with the given +// container. This excludes zombie processes. +func (c *container) GetRunningProcesses() ([]runtime.ContainerProcessState, error) { + pids, err := c.r.getRunningPids(c.id) + if err != nil { + return nil, err + } + + pidMap := map[int]*runtime.ContainerProcessState{} + // Initialize all processes with a pid and command, and mark correctly that + // none of them are zombies. Default CreatedByRuntime to false. + for _, pid := range pids { + command, err := c.r.getProcessCommand(pid) + if err != nil { + if errors.Is(err, unix.ENOENT) { + // process has exited between getting the running pids above + // and now, ignore error + continue + } + return nil, err + } + pidMap[pid] = &runtime.ContainerProcessState{Pid: pid, Command: command, CreatedByRuntime: false, IsZombie: false} + } + + // For each process state directory which corresponds to a running pid, set + // that the process was created by the Runtime. + processDirs, err := ioutil.ReadDir(filepath.Join(containerFilesDir, c.id)) + if err != nil { + return nil, errors.Wrapf(err, "failed to read the contents of container directory %s", filepath.Join(containerFilesDir, c.id)) + } + for _, processDir := range processDirs { + if processDir.Name() != initPidFilename { + pid, err := strconv.Atoi(processDir.Name()) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse string \"%s\" as pid", processDir.Name()) + } + if _, ok := pidMap[pid]; ok { + pidMap[pid].CreatedByRuntime = true + } + } + } + + return c.r.pidMapToProcessStates(pidMap), nil +} + +// GetAllProcesses gets all processes associated with the given container, +// including both running and zombie processes. +func (c *container) GetAllProcesses() ([]runtime.ContainerProcessState, error) { + runningPids, err := c.r.getRunningPids(c.id) + if err != nil { + return nil, err + } + + logrus.WithFields(logrus.Fields{ + "cid": c.id, + "pids": runningPids, + }).Debug("running container pids") + + pidMap := map[int]*runtime.ContainerProcessState{} + // Initialize all processes with a pid and command, leaving CreatedByRuntime + // and IsZombie at the default value of false. + for _, pid := range runningPids { + command, err := c.r.getProcessCommand(pid) + if err != nil { + if errors.Is(err, unix.ENOENT) { + // process has exited between getting the running pids above + // and now, ignore error + continue + } + return nil, err + } + pidMap[pid] = &runtime.ContainerProcessState{Pid: pid, Command: command, CreatedByRuntime: false, IsZombie: false} + } + + processDirs, err := ioutil.ReadDir(filepath.Join(containerFilesDir, c.id)) + if err != nil { + return nil, errors.Wrapf(err, "failed to read the contents of container directory %s", filepath.Join(containerFilesDir, c.id)) + } + // Loop over every process state directory. Since these processes have + // process state directories, CreatedByRuntime will be true for all of them. + for _, processDir := range processDirs { + if processDir.Name() != initPidFilename { + pid, err := strconv.Atoi(processDir.Name()) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse string \"%s\" into pid", processDir.Name()) + } + if c.r.processExists(pid) { + // If the process exists in /proc and is in the pidMap, it must + // be a running non-zombie. + if _, ok := pidMap[pid]; ok { + pidMap[pid].CreatedByRuntime = true + } else { + // Otherwise, since it's in /proc but not running, it must + // be a zombie. + command, err := c.r.getProcessCommand(pid) + if err != nil { + if errors.Is(err, unix.ENOENT) { + // process has exited between checking that it exists and now, ignore error + continue + } + return nil, err + } + pidMap[pid] = &runtime.ContainerProcessState{Pid: pid, Command: command, CreatedByRuntime: true, IsZombie: true} + } + } + } + } + return c.r.pidMapToProcessStates(pidMap), nil +} + +// GetInitProcess gets the init processes associated with the given container, +// including both running and zombie processes. +func (c *container) GetInitProcess() (runtime.Process, error) { + if c.init == nil { + return nil, errors.New("container has no init process") + } + return c.init, nil +} + +// Wait waits on every non-init process in the container, and then performs a +// final wait on the init process. The exit code returned is the exit code +// acquired from waiting on the init process. +func (c *container) Wait() (int, error) { + entity := logrus.WithField(logfields.ContainerID, c.id) + processes, err := c.GetAllProcesses() + if err != nil { + return -1, err + } + for _, process := range processes { + // Only wait on non-init processes that were created with exec. + if process.Pid != c.init.pid && process.CreatedByRuntime { + // FUTURE-jstarks: Consider waiting on the child process's relays as + // well (as in p.Wait()). This may not matter as long as the relays + // finish "soon" after Wait() returns since HCS expects the stdio + // connections to close before container shutdown can complete. + entity.WithField(logfields.ProcessID, process.Pid).Debug("waiting on container exec process") + c.r.waitOnProcess(process.Pid) + } + } + exitCode, err := c.init.Wait() + entity.Debug("runc::container::init process wait completed") + if err != nil { + return -1, err + } + return exitCode, nil +} + +// runExecCommand sets up the arguments for calling runc exec. +func (c *container) runExecCommand(processDef *oci.Process, stdioSet *stdio.ConnectionSet) (p runtime.Process, err error) { + // Create a temporary random directory to store the process's files. + tempProcessDir, err := ioutil.TempDir(containerFilesDir, c.id) + if err != nil { + return nil, err + } + + f, err := os.Create(filepath.Join(tempProcessDir, "process.json")) + if err != nil { + return nil, errors.Wrapf(err, "failed to create process.json file at %s", filepath.Join(tempProcessDir, "process.json")) + } + defer f.Close() + if err := json.NewEncoder(f).Encode(processDef); err != nil { + return nil, errors.Wrap(err, "failed to encode JSON into process.json file") + } + + args := []string{"exec"} + args = append(args, "-d", "--process", filepath.Join(tempProcessDir, "process.json")) + return c.startProcess(tempProcessDir, processDef.Terminal, stdioSet, args...) +} + +// startProcess performs the operations necessary to start a container process +// and properly handle its stdio. This function is used by both CreateContainer +// and ExecProcess. For V2 container creation stdioSet will be nil, in this case +// it is expected that the caller starts the relay previous to calling Start on +// the container. +func (c *container) startProcess(tempProcessDir string, hasTerminal bool, stdioSet *stdio.ConnectionSet, initialArgs ...string) (p *process, err error) { + args := initialArgs + + if err := setSubreaper(1); err != nil { + return nil, errors.Wrapf(err, "failed to set process as subreaper for process in container %s", c.id) + } + if err := c.r.makeLogDir(c.id); err != nil { + return nil, err + } + + logPath := c.r.getLogPath(c.id) + args = append(args, "--pid-file", filepath.Join(tempProcessDir, "pid")) + + var sockListener *net.UnixListener + if hasTerminal { + var consoleSockPath string + sockListener, consoleSockPath, err = c.r.createConsoleSocket(tempProcessDir) + if err != nil { + return nil, errors.Wrapf(err, "failed to create console socket for container %s", c.id) + } + defer sockListener.Close() + args = append(args, "--console-socket", consoleSockPath) + } + args = append(args, c.id) + + cmd := createRuncCommand(logPath, args...) + + var pipeRelay *stdio.PipeRelay + if !hasTerminal { + pipeRelay, err = stdio.NewPipeRelay(stdioSet) + if err != nil { + return nil, errors.Wrapf(err, "failed to create a pipe relay connection set for container %s", c.id) + } + fileSet, err := pipeRelay.Files() + if err != nil { + return nil, errors.Wrapf(err, "failed to get files for connection set for container %s", c.id) + } + // Closing the FileSet here is fine as that end of the pipes will have + // already been copied into the child process. + defer fileSet.Close() + if fileSet.In != nil { + cmd.Stdin = fileSet.In + } + if fileSet.Out != nil { + cmd.Stdout = fileSet.Out + } + if fileSet.Err != nil { + cmd.Stderr = fileSet.Err + } + } + + if err := cmd.Run(); err != nil { + runcErr := getRuncLogError(logPath) + return nil, errors.Wrapf(runcErr, "failed to run runc create/exec call for container %s with %v", c.id, err) + } + + var ttyRelay *stdio.TtyRelay + if hasTerminal { + var master *os.File + master, err = c.r.getMasterFromSocket(sockListener) + if err != nil { + cmd.Process.Kill() + return nil, errors.Wrapf(err, "failed to get pty master for process in container %s", c.id) + } + // Keep master open for the relay unless there is an error. + defer func() { + if err != nil { + master.Close() + } + }() + ttyRelay = stdio.NewTtyRelay(stdioSet, master) + } + + // Rename the process's directory to its pid. + pid, err := c.r.readPidFile(filepath.Join(tempProcessDir, "pid")) + if err != nil { + return nil, err + } + if err := os.Rename(tempProcessDir, c.r.getProcessDir(c.id, pid)); err != nil { + return nil, err + } + + if ttyRelay != nil && stdioSet != nil { + ttyRelay.Start() + } + if pipeRelay != nil && stdioSet != nil { + pipeRelay.Start() + } + return &process{c: c, pid: pid, ttyRelay: ttyRelay, pipeRelay: pipeRelay}, nil +} + +func (c *container) Update(resources interface{}) error { + jsonResources, err := json.Marshal(resources) + if err != nil { + return err + } + logPath := c.r.getLogPath(c.id) + args := []string{"update", "--resources", "-", c.id} + cmd := createRuncCommand(logPath, args...) + cmd.Stdin = strings.NewReader(string(jsonResources)) + out, err := cmd.CombinedOutput() + if err != nil { + runcErr := getRuncLogError(logPath) + return errors.Wrapf(runcErr, "runc update request %s failed with %v: %s", string(jsonResources), err, string(out)) + } + return nil +} diff --git a/guest/runtime/runc/process.go b/guest/runtime/runc/process.go new file mode 100644 index 0000000000..05e8525e4a --- /dev/null +++ b/guest/runtime/runc/process.go @@ -0,0 +1,94 @@ +//go:build linux +// +build linux + +package runc + +import ( + "syscall" + + "github.com/Microsoft/hcsshim/internal/guest/runtime" + "github.com/Microsoft/hcsshim/internal/guest/stdio" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/sirupsen/logrus" +) + +// process represents a process running in a container. It can either be a +// container's init process, or an exec process in a container. +type process struct { + c *container + pid int + ttyRelay *stdio.TtyRelay + pipeRelay *stdio.PipeRelay +} + +var _ runtime.Process = &process{} + +func (p *process) Pid() int { + return p.pid +} + +func (p *process) Tty() *stdio.TtyRelay { + return p.ttyRelay +} + +func (p *process) PipeRelay() *stdio.PipeRelay { + return p.pipeRelay +} + +// Delete deletes any state created for the process by either this wrapper or +// runC itself. +func (p *process) Delete() error { + if err := p.c.r.cleanupProcess(p.c.id, p.pid); err != nil { + return err + } + return nil +} + +func (p *process) Wait() (int, error) { + exitCode, err := p.c.r.waitOnProcess(p.pid) + + l := logrus.WithField(logfields.ContainerID, p.c.id) + l.WithField(logfields.ContainerID, p.pid).Debug("process wait completed") + + // If the init process for the container has exited, kill everything else in + // the container. Runc uses the devices cgroup of the container ot determine + // what other processes to kill. + // + // We don't issue the kill if the container owns its own pid namespace, + // because in that case the container kernel will kill everything in the pid + // namespace automatically (as the container init will be the pid namespace + // init). This prevents a potential issue where two containers share cgroups + // but have their own pid namespaces. If we didn't handle this case, runc + // would kill the processes in both containers when trying to kill + // either one of them. + if p == p.c.init && !p.c.ownsPidNamespace { + // If the init process of a pid namespace terminates, the kernel + // terminates all other processes in the namespace with SIGKILL. We + // simulate the same behavior. + if err := p.c.Kill(syscall.SIGKILL); err != nil { + l.WithError(err).Error("failed to terminate container after process wait") + } + } + + // Wait on the relay to drain any output that was already buffered. + // + // At this point, if this is the init process for the container, everything + // else in the container has been killed, so the write ends of the stdio + // relay will have been closed. + // + // If this is a container exec process instead, then it is possible the + // relay waits will hang waiting for the write ends to close. This can occur + // if the exec spawned any child processes that inherited its stdio. + // Currently we do not do anything to avoid hanging in this case, but in the + // future we could add special handling. + if p.ttyRelay != nil { + p.ttyRelay.Wait() + } + if p.pipeRelay != nil { + p.pipeRelay.Wait() + } + + l.WithField(logfields.ProcessID, p.pid).Debug("relay wait completed") + + return exitCode, err +} diff --git a/guest/runtime/runc/runc.go b/guest/runtime/runc/runc.go index 2e31c26cfb..633263c710 100644 --- a/guest/runtime/runc/runc.go +++ b/guest/runtime/runc/runc.go @@ -6,7 +6,6 @@ package runc import ( "encoding/json" "io/ioutil" - "net" "os" "path" "path/filepath" @@ -17,10 +16,8 @@ import ( "github.com/Microsoft/hcsshim/internal/guest/commonutils" "github.com/Microsoft/hcsshim/internal/guest/runtime" "github.com/Microsoft/hcsshim/internal/guest/stdio" - "github.com/Microsoft/hcsshim/internal/logfields" oci "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -33,67 +30,8 @@ func setSubreaper(i int) error { return unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) } -// runcRuntime is an implementation of the Runtime interface which uses runC as -// the container runtime. -type runcRuntime struct { - runcLogBasePath string -} - -var _ runtime.Runtime = &runcRuntime{} - -type container struct { - r *runcRuntime - id string - init *process - // ownsPidNamespace indicates whether the container's init process is also - // the init process for its pid namespace. - ownsPidNamespace bool -} - -var _ runtime.Container = &container{} - -func (c *container) ID() string { - return c.id -} - -func (c *container) Pid() int { - return c.init.Pid() -} - -func (c *container) Tty() *stdio.TtyRelay { - return c.init.ttyRelay -} - -func (c *container) PipeRelay() *stdio.PipeRelay { - return c.init.pipeRelay -} - -// process represents a process running in a container. It can either be a -// container's init process, or an exec process in a container. -type process struct { - c *container - pid int - ttyRelay *stdio.TtyRelay - pipeRelay *stdio.PipeRelay -} - -var _ runtime.Process = &process{} - -func (p *process) Pid() int { - return p.pid -} - -func (p *process) Tty() *stdio.TtyRelay { - return p.ttyRelay -} - -func (p *process) PipeRelay() *stdio.PipeRelay { - return p.pipeRelay -} - // NewRuntime instantiates a new runcRuntime struct. func NewRuntime(logBasePath string) (runtime.Runtime, error) { - rtime := &runcRuntime{runcLogBasePath: logBasePath} if err := rtime.initialize(); err != nil { return nil, err @@ -101,6 +39,14 @@ func NewRuntime(logBasePath string) (runtime.Runtime, error) { return rtime, nil } +// runcRuntime is an implementation of the Runtime interface which uses runC as +// the container runtime. +type runcRuntime struct { + runcLogBasePath string +} + +var _ runtime.Runtime = &runcRuntime{} + // initialize sets up any state necessary for the runcRuntime to function. func (r *runcRuntime) initialize() error { paths := [2]string{containerFilesDir, r.runcLogBasePath} @@ -132,139 +78,6 @@ func (r *runcRuntime) CreateContainer(id string, bundlePath string, stdioSet *st return c, nil } -// Start unblocks the container's init process created by the call to -// CreateContainer. -func (c *container) Start() error { - logPath := c.r.getLogPath(c.id) - args := []string{"start", c.id} - cmd := createRuncCommand(logPath, args...) - out, err := cmd.CombinedOutput() - if err != nil { - runcErr := getRuncLogError(logPath) - c.r.cleanupContainer(c.id) - return errors.Wrapf(runcErr, "runc start failed with %v: %s", err, string(out)) - } - return nil -} - -// ExecProcess executes a new process, represented as an OCI process struct, -// inside an already-running container. -func (c *container) ExecProcess(process *oci.Process, stdioSet *stdio.ConnectionSet) (p runtime.Process, err error) { - p, err = c.runExecCommand(process, stdioSet) - if err != nil { - return nil, err - } - return p, nil -} - -// Kill sends the specified signal to the container's init process. -func (c *container) Kill(signal syscall.Signal) error { - logrus.WithField(logfields.ContainerID, c.id).Debug("runc::container::Kill") - logPath := c.r.getLogPath(c.id) - args := []string{"kill"} - if signal == syscall.SIGTERM || signal == syscall.SIGKILL { - args = append(args, "--all") - } - args = append(args, c.id, strconv.Itoa(int(signal))) - cmd := createRuncCommand(logPath, args...) - out, err := cmd.CombinedOutput() - if err != nil { - runcErr := getRuncLogError(logPath) - return errors.Wrapf(runcErr, "unknown runc error after kill %v: %s", err, string(out)) - } - return nil -} - -// Delete deletes any state created for the container by either this wrapper or -// runC itself. -func (c *container) Delete() error { - logrus.WithField(logfields.ContainerID, c.id).Debug("runc::container::Delete") - logPath := c.r.getLogPath(c.id) - args := []string{"delete", c.id} - cmd := createRuncCommand(logPath, args...) - out, err := cmd.CombinedOutput() - if err != nil { - runcErr := getRuncLogError(logPath) - return errors.Wrapf(runcErr, "runc delete failed with %v: %s", err, string(out)) - } - if err := c.r.cleanupContainer(c.id); err != nil { - return err - } - return nil -} - -// Delete deletes any state created for the process by either this wrapper or -// runC itself. -func (p *process) Delete() error { - if err := p.c.r.cleanupProcess(p.c.id, p.pid); err != nil { - return err - } - return nil -} - -// Pause suspends all processes running in the container. -func (c *container) Pause() error { - logPath := c.r.getLogPath(c.id) - args := []string{"pause", c.id} - cmd := createRuncCommand(logPath, args...) - out, err := cmd.CombinedOutput() - if err != nil { - runcErr := getRuncLogError(logPath) - return errors.Wrapf(runcErr, "runc pause failed with %v: %s", err, string(out)) - } - return nil -} - -// Resume unsuspends processes running in the container. -func (c *container) Resume() error { - logPath := c.r.getLogPath(c.id) - args := []string{"resume", c.id} - cmd := createRuncCommand(logPath, args...) - out, err := cmd.CombinedOutput() - if err != nil { - runcErr := getRuncLogError(logPath) - return errors.Wrapf(runcErr, "runc resume failed with %v: %s", err, string(out)) - } - return nil -} - -// GetState returns information about the given container. -func (c *container) GetState() (*runtime.ContainerState, error) { - logPath := c.r.getLogPath(c.id) - args := []string{"state", c.id} - cmd := createRuncCommand(logPath, args...) - out, err := cmd.CombinedOutput() - if err != nil { - runcErr := getRuncLogError(logPath) - return nil, errors.Wrapf(runcErr, "runc state failed with %v: %s", err, string(out)) - } - var state runtime.ContainerState - if err := json.Unmarshal(out, &state); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal the state for container %s", c.id) - } - return &state, nil -} - -// Exists returns true if the container exists, false if it doesn't -// exist. -// It should be noted that containers that have stopped but have not been -// deleted are still considered to exist. -func (c *container) Exists() (bool, error) { - // use global path because container may not exist - logPath := c.r.getGlobalLogPath() - args := []string{"state", c.id} - cmd := createRuncCommand(logPath, args...) - out, err := cmd.CombinedOutput() - if err != nil { - runcErr := getRuncLogError(logPath) - if errors.Is(runcErr, runtime.ErrContainerDoesNotExist) { - return false, nil - } - return false, errors.Wrapf(runcErr, "runc state failed with %v: %s", err, string(out)) - } - return true, nil -} - // ListContainerStates returns ContainerState structs for all existing // containers, whether they're running or not. func (r *runcRuntime) ListContainerStates() ([]runtime.ContainerState, error) { @@ -283,125 +96,6 @@ func (r *runcRuntime) ListContainerStates() ([]runtime.ContainerState, error) { return states, nil } -// GetRunningProcesses gets only the running processes associated with the given -// container. This excludes zombie processes. -func (c *container) GetRunningProcesses() ([]runtime.ContainerProcessState, error) { - pids, err := c.r.getRunningPids(c.id) - if err != nil { - return nil, err - } - - pidMap := map[int]*runtime.ContainerProcessState{} - // Initialize all processes with a pid and command, and mark correctly that - // none of them are zombies. Default CreatedByRuntime to false. - for _, pid := range pids { - command, err := c.r.getProcessCommand(pid) - if err != nil { - if errors.Is(err, unix.ENOENT) { - // process has exited between getting the running pids above - // and now, ignore error - continue - } - return nil, err - } - pidMap[pid] = &runtime.ContainerProcessState{Pid: pid, Command: command, CreatedByRuntime: false, IsZombie: false} - } - - // For each process state directory which corresponds to a running pid, set - // that the process was created by the Runtime. - processDirs, err := ioutil.ReadDir(filepath.Join(containerFilesDir, c.id)) - if err != nil { - return nil, errors.Wrapf(err, "failed to read the contents of container directory %s", filepath.Join(containerFilesDir, c.id)) - } - for _, processDir := range processDirs { - if processDir.Name() != initPidFilename { - pid, err := strconv.Atoi(processDir.Name()) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse string \"%s\" as pid", processDir.Name()) - } - if _, ok := pidMap[pid]; ok { - pidMap[pid].CreatedByRuntime = true - } - } - } - - return c.r.pidMapToProcessStates(pidMap), nil -} - -// GetAllProcesses gets all processes associated with the given container, -// including both running and zombie processes. -func (c *container) GetAllProcesses() ([]runtime.ContainerProcessState, error) { - runningPids, err := c.r.getRunningPids(c.id) - if err != nil { - return nil, err - } - - logrus.WithFields(logrus.Fields{ - "cid": c.id, - "pids": runningPids, - }).Debug("running container pids") - - pidMap := map[int]*runtime.ContainerProcessState{} - // Initialize all processes with a pid and command, leaving CreatedByRuntime - // and IsZombie at the default value of false. - for _, pid := range runningPids { - command, err := c.r.getProcessCommand(pid) - if err != nil { - if errors.Is(err, unix.ENOENT) { - // process has exited between getting the running pids above - // and now, ignore error - continue - } - return nil, err - } - pidMap[pid] = &runtime.ContainerProcessState{Pid: pid, Command: command, CreatedByRuntime: false, IsZombie: false} - } - - processDirs, err := ioutil.ReadDir(filepath.Join(containerFilesDir, c.id)) - if err != nil { - return nil, errors.Wrapf(err, "failed to read the contents of container directory %s", filepath.Join(containerFilesDir, c.id)) - } - // Loop over every process state directory. Since these processes have - // process state directories, CreatedByRuntime will be true for all of them. - for _, processDir := range processDirs { - if processDir.Name() != initPidFilename { - pid, err := strconv.Atoi(processDir.Name()) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse string \"%s\" into pid", processDir.Name()) - } - if c.r.processExists(pid) { - // If the process exists in /proc and is in the pidMap, it must - // be a running non-zombie. - if _, ok := pidMap[pid]; ok { - pidMap[pid].CreatedByRuntime = true - } else { - // Otherwise, since it's in /proc but not running, it must - // be a zombie. - command, err := c.r.getProcessCommand(pid) - if err != nil { - if errors.Is(err, unix.ENOENT) { - // process has exited between checking that it exists and now, ignore error - continue - } - return nil, err - } - pidMap[pid] = &runtime.ContainerProcessState{Pid: pid, Command: command, CreatedByRuntime: true, IsZombie: true} - } - } - } - } - return c.r.pidMapToProcessStates(pidMap), nil -} - -// GetInitProcess gets the init processes associated with the given container, -// including both running and zombie processes. -func (c *container) GetInitProcess() (runtime.Process, error) { - if c.init == nil { - return nil, errors.New("container has no init process") - } - return c.init, nil -} - // getRunningPids gets the pids of all processes which runC recognizes as // running. func (r *runcRuntime) getRunningPids(id string) ([]int, error) { @@ -464,83 +158,6 @@ func (r *runcRuntime) waitOnProcess(pid int) (int, error) { return status.ExitStatus(), nil } -func (p *process) Wait() (int, error) { - exitCode, err := p.c.r.waitOnProcess(p.pid) - - l := logrus.WithField(logfields.ContainerID, p.c.id) - l.WithField(logfields.ContainerID, p.pid).Debug("process wait completed") - - // If the init process for the container has exited, kill everything else in - // the container. Runc uses the devices cgroup of the container ot determine - // what other processes to kill. - // - // We don't issue the kill if the container owns its own pid namespace, - // because in that case the container kernel will kill everything in the pid - // namespace automatically (as the container init will be the pid namespace - // init). This prevents a potential issue where two containers share cgroups - // but have their own pid namespaces. If we didn't handle this case, runc - // would kill the processes in both containers when trying to kill - // either one of them. - if p == p.c.init && !p.c.ownsPidNamespace { - // If the init process of a pid namespace terminates, the kernel - // terminates all other processes in the namespace with SIGKILL. We - // simulate the same behavior. - if err := p.c.Kill(syscall.SIGKILL); err != nil { - l.WithError(err).Error("failed to terminate container after process wait") - } - } - - // Wait on the relay to drain any output that was already buffered. - // - // At this point, if this is the init process for the container, everything - // else in the container has been killed, so the write ends of the stdio - // relay will have been closed. - // - // If this is a container exec process instead, then it is possible the - // relay waits will hang waiting for the write ends to close. This can occur - // if the exec spawned any child processes that inherited its stdio. - // Currently we do not do anything to avoid hanging in this case, but in the - // future we could add special handling. - if p.ttyRelay != nil { - p.ttyRelay.Wait() - } - if p.pipeRelay != nil { - p.pipeRelay.Wait() - } - - l.WithField(logfields.ProcessID, p.pid).Debug("relay wait completed") - - return exitCode, err -} - -// Wait waits on every non-init process in the container, and then performs a -// final wait on the init process. The exit code returned is the exit code -// acquired from waiting on the init process. -func (c *container) Wait() (int, error) { - entity := logrus.WithField(logfields.ContainerID, c.id) - processes, err := c.GetAllProcesses() - if err != nil { - return -1, err - } - for _, process := range processes { - // Only wait on non-init processes that were created with exec. - if process.Pid != c.init.pid && process.CreatedByRuntime { - // FUTURE-jstarks: Consider waiting on the child process's relays as - // well (as in p.Wait()). This may not matter as long as the relays - // finish "soon" after Wait() returns since HCS expects the stdio - // connections to close before container shutdown can complete. - entity.WithField(logfields.ProcessID, process.Pid).Debug("waiting on container exec process") - c.r.waitOnProcess(process.Pid) - } - } - exitCode, err := c.init.Wait() - entity.Debug("runc::container::init process wait completed") - if err != nil { - return -1, err - } - return exitCode, nil -} - // runCreateCommand sets up the arguments for calling runc create. func (r *runcRuntime) runCreateCommand(id string, bundlePath string, stdioSet *stdio.ConnectionSet) (runtime.Container, error) { c := &container{r: r, id: id} @@ -609,138 +226,3 @@ func ociSpecFromBundle(bundlePath string) (*oci.Spec, error) { } return spec, nil } - -// runExecCommand sets up the arguments for calling runc exec. -func (c *container) runExecCommand(processDef *oci.Process, stdioSet *stdio.ConnectionSet) (p runtime.Process, err error) { - // Create a temporary random directory to store the process's files. - tempProcessDir, err := ioutil.TempDir(containerFilesDir, c.id) - if err != nil { - return nil, err - } - - f, err := os.Create(filepath.Join(tempProcessDir, "process.json")) - if err != nil { - return nil, errors.Wrapf(err, "failed to create process.json file at %s", filepath.Join(tempProcessDir, "process.json")) - } - defer f.Close() - if err := json.NewEncoder(f).Encode(processDef); err != nil { - return nil, errors.Wrap(err, "failed to encode JSON into process.json file") - } - - args := []string{"exec"} - args = append(args, "-d", "--process", filepath.Join(tempProcessDir, "process.json")) - return c.startProcess(tempProcessDir, processDef.Terminal, stdioSet, args...) -} - -// startProcess performs the operations necessary to start a container process -// and properly handle its stdio. This function is used by both CreateContainer -// and ExecProcess. For V2 container creation stdioSet will be nil, in this case -// it is expected that the caller starts the relay previous to calling Start on -// the container. -func (c *container) startProcess(tempProcessDir string, hasTerminal bool, stdioSet *stdio.ConnectionSet, initialArgs ...string) (p *process, err error) { - args := initialArgs - - if err := setSubreaper(1); err != nil { - return nil, errors.Wrapf(err, "failed to set process as subreaper for process in container %s", c.id) - } - if err := c.r.makeLogDir(c.id); err != nil { - return nil, err - } - - logPath := c.r.getLogPath(c.id) - args = append(args, "--pid-file", filepath.Join(tempProcessDir, "pid")) - - var sockListener *net.UnixListener - if hasTerminal { - var consoleSockPath string - sockListener, consoleSockPath, err = c.r.createConsoleSocket(tempProcessDir) - if err != nil { - return nil, errors.Wrapf(err, "failed to create console socket for container %s", c.id) - } - defer sockListener.Close() - args = append(args, "--console-socket", consoleSockPath) - } - args = append(args, c.id) - - cmd := createRuncCommand(logPath, args...) - - var pipeRelay *stdio.PipeRelay - if !hasTerminal { - pipeRelay, err = stdio.NewPipeRelay(stdioSet) - if err != nil { - return nil, errors.Wrapf(err, "failed to create a pipe relay connection set for container %s", c.id) - } - fileSet, err := pipeRelay.Files() - if err != nil { - return nil, errors.Wrapf(err, "failed to get files for connection set for container %s", c.id) - } - // Closing the FileSet here is fine as that end of the pipes will have - // already been copied into the child process. - defer fileSet.Close() - if fileSet.In != nil { - cmd.Stdin = fileSet.In - } - if fileSet.Out != nil { - cmd.Stdout = fileSet.Out - } - if fileSet.Err != nil { - cmd.Stderr = fileSet.Err - } - } - - if err := cmd.Run(); err != nil { - runcErr := getRuncLogError(logPath) - return nil, errors.Wrapf(runcErr, "failed to run runc create/exec call for container %s with %v", c.id, err) - } - - var ttyRelay *stdio.TtyRelay - if hasTerminal { - var master *os.File - master, err = c.r.getMasterFromSocket(sockListener) - if err != nil { - cmd.Process.Kill() - return nil, errors.Wrapf(err, "failed to get pty master for process in container %s", c.id) - } - // Keep master open for the relay unless there is an error. - defer func() { - if err != nil { - master.Close() - } - }() - ttyRelay = stdio.NewTtyRelay(stdioSet, master) - } - - // Rename the process's directory to its pid. - pid, err := c.r.readPidFile(filepath.Join(tempProcessDir, "pid")) - if err != nil { - return nil, err - } - if err := os.Rename(tempProcessDir, c.r.getProcessDir(c.id, pid)); err != nil { - return nil, err - } - - if ttyRelay != nil && stdioSet != nil { - ttyRelay.Start() - } - if pipeRelay != nil && stdioSet != nil { - pipeRelay.Start() - } - return &process{c: c, pid: pid, ttyRelay: ttyRelay, pipeRelay: pipeRelay}, nil -} - -func (c *container) Update(resources interface{}) error { - jsonResources, err := json.Marshal(resources) - if err != nil { - return err - } - logPath := c.r.getLogPath(c.id) - args := []string{"update", "--resources", "-", c.id} - cmd := createRuncCommand(logPath, args...) - cmd.Stdin = strings.NewReader(string(jsonResources)) - out, err := cmd.CombinedOutput() - if err != nil { - runcErr := getRuncLogError(logPath) - return errors.Wrapf(runcErr, "runc update request %s failed with %v: %s", string(jsonResources), err, string(out)) - } - return nil -}