diff --git a/cmd/containerd-shim-runhcs-v1/delete.go b/cmd/containerd-shim-runhcs-v1/delete.go index 5c8f8313e4..fadd1c5594 100644 --- a/cmd/containerd-shim-runhcs-v1/delete.go +++ b/cmd/containerd-shim-runhcs-v1/delete.go @@ -4,13 +4,13 @@ package main import ( "context" + "errors" "fmt" "os" "path/filepath" "time" task "github.com/containerd/containerd/api/runtime/task/v2" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" "google.golang.org/protobuf/proto" @@ -27,7 +27,7 @@ import ( func limitedRead(filePath string, readLimitBytes int64) ([]byte, error) { f, err := os.Open(filePath) if err != nil { - return nil, errors.Wrapf(err, "limited read failed to open file: %s", filePath) + return nil, err } defer f.Close() if fi, err := f.Stat(); err == nil { @@ -37,11 +37,11 @@ func limitedRead(filePath string, readLimitBytes int64) ([]byte, error) { buf := make([]byte, readLimitBytes) _, err := f.Read(buf) if err != nil { - return []byte{}, errors.Wrapf(err, "limited read failed during file read: %s", filePath) + return nil, err } return buf, nil } - return []byte{}, errors.Wrapf(err, "limited read failed during file stat: %s", filePath) + return nil, err } var deleteCommand = cli.Command{ diff --git a/cmd/containerd-shim-runhcs-v1/exec.go b/cmd/containerd-shim-runhcs-v1/exec.go index 8f456aa680..4da26cd534 100644 --- a/cmd/containerd-shim-runhcs-v1/exec.go +++ b/cmd/containerd-shim-runhcs-v1/exec.go @@ -4,10 +4,10 @@ package main import ( "context" + "fmt" task "github.com/containerd/containerd/api/runtime/task/v2" "github.com/containerd/errdefs" - "github.com/pkg/errors" ) type shimExecState string @@ -86,11 +86,6 @@ type shimExec interface { } func newExecInvalidStateError(tid, eid string, state shimExecState, op string) error { - return errors.Wrapf( - errdefs.ErrFailedPrecondition, - "exec: '%s' in task: '%s' is in invalid state: '%s' for %s", - eid, - tid, - state, - op) + return fmt.Errorf("exec: %q in task: %q is in invalid state: %q for %s: %w", + eid, tid, state, op, errdefs.ErrFailedPrecondition) } diff --git a/cmd/containerd-shim-runhcs-v1/exec_hcs.go b/cmd/containerd-shim-runhcs-v1/exec_hcs.go index 84cdb38d24..8a0e86e8d1 100644 --- a/cmd/containerd-shim-runhcs-v1/exec_hcs.go +++ b/cmd/containerd-shim-runhcs-v1/exec_hcs.go @@ -4,6 +4,7 @@ package main import ( "context" + "fmt" "sync" "time" @@ -13,7 +14,6 @@ import ( "github.com/containerd/containerd/runtime" "github.com/containerd/errdefs" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "go.opencensus.io/trace" "google.golang.org/protobuf/types/known/timestamppb" @@ -42,7 +42,8 @@ func newHcsExec( id, bundle string, isWCOW bool, spec *specs.Process, - io cmd.UpstreamIO) shimExec { + io cmd.UpstreamIO, +) shimExec { log.G(ctx).WithFields(logrus.Fields{ "tid": tid, "eid": id, // Init exec ID is always same as Task ID @@ -287,7 +288,7 @@ func (he *hcsExec) Kill(ctx context.Context, signal uint32) error { } } if err != nil { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "signal %d: %v", signal, err) + return fmt.Errorf("signal %d: %w: %w", signal, err, errdefs.ErrFailedPrecondition) } var delivered bool if supported && options != nil { @@ -331,11 +332,11 @@ func (he *hcsExec) Kill(ctx context.Context, signal uint32) error { return err } if !delivered { - return errors.Wrapf(errdefs.ErrNotFound, "exec: '%s' in task: '%s' not found", he.id, he.tid) + return fmt.Errorf("exec: %q in task: %q: %w", he.id, he.tid, errdefs.ErrNotFound) } return nil case shimExecStateExited: - return errors.Wrapf(errdefs.ErrNotFound, "exec: '%s' in task: '%s' not found", he.id, he.tid) + return fmt.Errorf("exec: %q in task: %q: %w", he.id, he.tid, errdefs.ErrNotFound) default: return newExecInvalidStateError(he.tid, he.id, he.state, "kill") } @@ -345,7 +346,7 @@ func (he *hcsExec) ResizePty(ctx context.Context, width, height uint32) error { he.sl.Lock() defer he.sl.Unlock() if !he.io.Terminal() { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "exec: '%s' in task: '%s' is not a tty", he.id, he.tid) + return fmt.Errorf("exec: %q in task: %q is not a tty: %w", he.id, he.tid, errdefs.ErrFailedPrecondition) } if he.state == shimExecStateRunning { diff --git a/cmd/containerd-shim-runhcs-v1/exec_wcow_podsandbox.go b/cmd/containerd-shim-runhcs-v1/exec_wcow_podsandbox.go index f7df0a390e..a8a27cb5c6 100644 --- a/cmd/containerd-shim-runhcs-v1/exec_wcow_podsandbox.go +++ b/cmd/containerd-shim-runhcs-v1/exec_wcow_podsandbox.go @@ -4,6 +4,7 @@ package main import ( "context" + "fmt" "sync" "time" @@ -13,7 +14,6 @@ import ( containerd_v1_types "github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/runtime" "github.com/containerd/errdefs" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -166,7 +166,7 @@ func (wpse *wcowPodSandboxExec) Kill(ctx context.Context, signal uint32) error { close(wpse.exited) return nil case shimExecStateExited: - return errors.Wrapf(errdefs.ErrNotFound, "exec: '%s' in task: '%s' not found", wpse.tid, wpse.tid) + return fmt.Errorf("exec: %q in task: %q: %w", wpse.tid, wpse.tid, errdefs.ErrNotFound) default: return newExecInvalidStateError(wpse.tid, wpse.tid, wpse.state, "kill") } @@ -177,7 +177,7 @@ func (wpse *wcowPodSandboxExec) ResizePty(ctx context.Context, width, height uin defer wpse.sl.Unlock() // We will never have IO for a sandbox container so we wont have a tty // either. - return errors.Wrapf(errdefs.ErrFailedPrecondition, "exec: '%s' in task: '%s' is not a tty", wpse.tid, wpse.tid) + return fmt.Errorf("exec: %q in task: %q is not a tty: %w", wpse.tid, wpse.tid, errdefs.ErrFailedPrecondition) } func (wpse *wcowPodSandboxExec) CloseIO(ctx context.Context, stdin bool) error { diff --git a/cmd/containerd-shim-runhcs-v1/pod.go b/cmd/containerd-shim-runhcs-v1/pod.go index 1d2551ee4d..a9ec297ad7 100644 --- a/cmd/containerd-shim-runhcs-v1/pod.go +++ b/cmd/containerd-shim-runhcs-v1/pod.go @@ -4,6 +4,7 @@ package main import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -21,7 +22,6 @@ import ( "github.com/containerd/containerd/runtime" "github.com/containerd/errdefs" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -73,7 +73,7 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques log.G(ctx).WithField("tid", req.ID).Debug("createPod") if osversion.Build() < osversion.RS5 { - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "pod support is not available on Windows versions previous to RS5 (%d)", osversion.RS5) + return nil, fmt.Errorf("pod support is not available on Windows versions previous to RS5 (%d): %w", osversion.RS5, errdefs.ErrFailedPrecondition) } ct, sid, err := oci.GetSandboxTypeAndID(s.Annotations) @@ -81,20 +81,20 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques return nil, err } if ct != oci.KubernetesContainerTypeSandbox { - return nil, errors.Wrapf( - errdefs.ErrFailedPrecondition, - "expected annotation: '%s': '%s' got '%s'", + return nil, fmt.Errorf( + "expected annotation: %q: %q, got %q: %w", annotations.KubernetesContainerType, oci.KubernetesContainerTypeSandbox, - ct) + ct, + errdefs.ErrFailedPrecondition) } if sid != req.ID { - return nil, errors.Wrapf( - errdefs.ErrFailedPrecondition, - "expected annotation '%s': '%s' got '%s'", + return nil, fmt.Errorf( + "expected annotation %q: %q, got %q: %w", annotations.KubernetesSandboxID, req.ID, - sid) + sid, + errdefs.ErrFailedPrecondition) } owner := filepath.Base(os.Args[0]) @@ -168,7 +168,7 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques p.jobContainer = true return &p, nil } else if !isWCOW { - return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "oci spec does not contain WCOW or LCOW spec") + return nil, fmt.Errorf("oci spec does not contain WCOW or LCOW spec: %w", errdefs.ErrFailedPrecondition) } defer func() { @@ -208,7 +208,7 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques if nsid != "" { if err := parent.ConfigureNetworking(ctx, nsid); err != nil { - return nil, errors.Wrapf(err, "failed to setup networking for pod %q", req.ID) + return nil, fmt.Errorf("failed to setup networking for pod %q: %w", req.ID, err) } } p.sandboxTask = newWcowPodSandboxTask(ctx, events, req.ID, req.Bundle, parent, nsid) @@ -297,16 +297,16 @@ func (p *pod) ID() string { func (p *pod) CreateTask(ctx context.Context, req *task.CreateTaskRequest, s *specs.Spec) (_ shimTask, err error) { if req.ID == p.id { - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "task with id: '%s' already exists", req.ID) + return nil, fmt.Errorf("task with id: %q: %w", req.ID, errdefs.ErrAlreadyExists) } e, _ := p.sandboxTask.GetExec("") if e.State() != shimExecStateRunning { - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "task with id: '%s' cannot be created in pod: '%s' which is not running", req.ID, p.id) + return nil, fmt.Errorf("task with id: %q cannot be created in pod: %q which is not running: %w", req.ID, p.id, errdefs.ErrFailedPrecondition) } _, ok := p.workloadTasks.Load(req.ID) if ok { - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "task with id: '%s' already exists id pod: '%s'", req.ID, p.id) + return nil, fmt.Errorf("task with id: %q already exists in pod: %q: %w", req.ID, p.id, errdefs.ErrAlreadyExists) } if p.jobContainer { @@ -334,20 +334,20 @@ func (p *pod) CreateTask(ctx context.Context, req *task.CreateTaskRequest, s *sp return nil, err } if ct != oci.KubernetesContainerTypeContainer { - return nil, errors.Wrapf( - errdefs.ErrFailedPrecondition, - "expected annotation: '%s': '%s' got '%s'", + return nil, fmt.Errorf( + "expected annotation: %q: %q, got %q: %w", annotations.KubernetesContainerType, oci.KubernetesContainerTypeContainer, - ct) + ct, + errdefs.ErrFailedPrecondition) } if sid != p.id { - return nil, errors.Wrapf( - errdefs.ErrFailedPrecondition, - "expected annotation '%s': '%s' got '%s'", + return nil, fmt.Errorf( + "expected annotation %q: %q, got %q: %w", annotations.KubernetesSandboxID, p.id, - sid) + sid, + errdefs.ErrFailedPrecondition) } st, err := newHcsTask(ctx, p.events, p.host, false, req, s) @@ -365,7 +365,7 @@ func (p *pod) GetTask(tid string) (shimTask, error) { } raw, loaded := p.workloadTasks.Load(tid) if !loaded { - return nil, errors.Wrapf(errdefs.ErrNotFound, "task with id: '%s' not found", tid) + return nil, fmt.Errorf("task with id: %q: %w", tid, errdefs.ErrNotFound) } return raw.(shimTask), nil } @@ -395,7 +395,7 @@ func (p *pod) KillTask(ctx context.Context, tid, eid string, signal uint32, all return err } if all && eid != "" { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot signal all with non empty ExecID: '%s'", eid) + return fmt.Errorf("cannot signal all with non empty ExecID: %q: %w", eid, errdefs.ErrFailedPrecondition) } eg := errgroup.Group{} if all && tid == p.id { @@ -426,15 +426,15 @@ func (p *pod) DeleteTask(ctx context.Context, tid string) error { t, err := p.GetTask(tid) if err != nil { - return errors.Wrap(err, "could not find task to delete") + return fmt.Errorf("could not find task to delete: %w", err) } e, err := t.GetExec("") if err != nil { - return errors.Wrap(err, "could not get initial exec") + return fmt.Errorf("could not get initial exec: %w", err) } if e.State() == shimExecStateRunning { - return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot delete task with running exec") + return fmt.Errorf("cannot delete task with running exec: %w", errdefs.ErrFailedPrecondition) } if p.id != tid { diff --git a/cmd/containerd-shim-runhcs-v1/serve.go b/cmd/containerd-shim-runhcs-v1/serve.go index 290f986c42..d76039f68a 100644 --- a/cmd/containerd-shim-runhcs-v1/serve.go +++ b/cmd/containerd-shim-runhcs-v1/serve.go @@ -4,6 +4,7 @@ package main import ( "context" + "errors" "fmt" "io" "net" @@ -16,7 +17,6 @@ import ( task "github.com/containerd/containerd/api/runtime/task/v2" "github.com/containerd/ttrpc" typeurl "github.com/containerd/typeurl/v2" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" "golang.org/x/sys/windows" @@ -79,7 +79,7 @@ var serveCommand = cli.Command{ // containerd passes the shim options protobuf via stdin. newShimOpts, err := readOptions(os.Stdin) if err != nil { - return errors.Wrap(err, "failed to read shim options from stdin") + return fmt.Errorf("failed to read shim options from stdin: %w", err) } else if newShimOpts != nil { // We received a valid shim options struct. shimOpts = newShimOpts @@ -100,7 +100,7 @@ var serveCommand = cli.Command{ if shimOpts.LogLevel != "" { lvl, err := logrus.ParseLevel(shimOpts.LogLevel) if err != nil { - return errors.Wrapf(err, "failed to parse shim log level %q", shimOpts.LogLevel) + return fmt.Errorf("failed to parse shim log level %q: %w", shimOpts.LogLevel, err) } logrus.SetLevel(lvl) } @@ -274,16 +274,16 @@ func trapClosedConnErr(err error) error { func readOptions(r io.Reader) (*runhcsopts.Options, error) { d, err := io.ReadAll(r) if err != nil { - return nil, errors.Wrap(err, "failed to read input") + return nil, fmt.Errorf("failed to read input: %w", err) } if len(d) > 0 { var a anypb.Any if err := proto.Unmarshal(d, &a); err != nil { - return nil, errors.Wrap(err, "failed unmarshalling into Any") + return nil, fmt.Errorf("failed unmarshalling into Any: %w", err) } v, err := typeurl.UnmarshalAny(&a) if err != nil { - return nil, errors.Wrap(err, "failed unmarshalling by typeurl") + return nil, fmt.Errorf("failed unmarshalling by typeurl: %w", err) } return v.(*runhcsopts.Options), nil } @@ -296,7 +296,7 @@ func createEvent(event string) (windows.Handle, error) { ev, _ := windows.UTF16PtrFromString(event) sd, err := windows.SecurityDescriptorFromString("D:P(A;;GA;;;BA)(A;;GA;;;SY)") if err != nil { - return 0, errors.Wrapf(err, "failed to get security descriptor for event '%s'", event) + return 0, fmt.Errorf("failed to get security descriptor for event %q: %w", event, err) } var sa windows.SecurityAttributes sa.Length = uint32(unsafe.Sizeof(sa)) @@ -304,7 +304,7 @@ func createEvent(event string) (windows.Handle, error) { sa.SecurityDescriptor = sd h, err := windows.CreateEvent(&sa, 0, 0, ev) if h == 0 || err != nil { - return 0, errors.Wrapf(err, "failed to create event '%s'", event) + return 0, fmt.Errorf("failed to create event %q: %w", event, err) } return h, nil } diff --git a/cmd/containerd-shim-runhcs-v1/service_internal.go b/cmd/containerd-shim-runhcs-v1/service_internal.go index b24e7b139a..58d5ad9871 100644 --- a/cmd/containerd-shim-runhcs-v1/service_internal.go +++ b/cmd/containerd-shim-runhcs-v1/service_internal.go @@ -5,6 +5,7 @@ package main import ( "context" "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -15,7 +16,6 @@ import ( "github.com/containerd/errdefs" typeurl "github.com/containerd/typeurl/v2" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" @@ -35,7 +35,7 @@ var empty = &emptypb.Empty{} func (s *service) getPod() (shimPod, error) { raw := s.taskOrPod.Load() if raw == nil { - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "task with id: '%s' must be created first", s.tid) + return nil, fmt.Errorf("task with id: %q must be created first: %w", s.tid, errdefs.ErrFailedPrecondition) } return raw.(shimPod), nil } @@ -47,7 +47,7 @@ func (s *service) getPod() (shimPod, error) { func (s *service) getTask(tid string) (shimTask, error) { raw := s.taskOrPod.Load() if raw == nil { - return nil, errors.Wrapf(errdefs.ErrNotFound, "task with id: '%s' not found", tid) + return nil, fmt.Errorf("task with id: %q: %w", tid, errdefs.ErrNotFound) } if s.isSandbox { p := raw.(shimPod) @@ -55,7 +55,7 @@ func (s *service) getTask(tid string) (shimTask, error) { } // When its not a sandbox only the init task is a valid id. if s.tid != tid { - return nil, errors.Wrapf(errdefs.ErrNotFound, "task with id: '%s' not found", tid) + return nil, fmt.Errorf("task with id: %q: %w", tid, errdefs.ErrNotFound) } return raw.(shimTask), nil } @@ -96,12 +96,12 @@ func (s *service) createInternal(ctx context.Context, req *task.CreateTaskReques f.Close() spec = oci.UpdateSpecFromOptions(spec, shimOpts) - //expand annotations after defaults have been loaded in from options + // expand annotations after defaults have been loaded in from options err = oci.ProcessAnnotations(ctx, &spec) // since annotation expansion is used to toggle security features // raise it rather than suppress and move on if err != nil { - return nil, errors.Wrap(err, "unable to process OCI Spec annotations") + return nil, fmt.Errorf("unable to process OCI Spec annotations: %w", err) } // If sandbox isolation is set to hypervisor, make sure the HyperV option @@ -124,7 +124,7 @@ func (s *service) createInternal(ctx context.Context, req *task.CreateTaskReques } if req.Terminal && req.Stderr != "" { - return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "if using terminal, stderr must be empty") + return nil, fmt.Errorf("if using terminal, stderr must be empty: %w", errdefs.ErrFailedPrecondition) } resp := &task.CreateTaskResponse{} @@ -198,7 +198,7 @@ func (s *service) deleteInternal(ctx context.Context, req *task.DeleteRequest) ( if s.isSandbox && req.ExecID == "" { p, err := s.getPod() if err != nil { - return nil, errors.Wrapf(err, "could not get pod %q to delete task %q", s.tid, req.ID) + return nil, fmt.Errorf("could not get pod %q to delete task %q: %w", s.tid, req.ID, err) } err = p.DeleteTask(ctx, req.ID) if err != nil { @@ -227,7 +227,7 @@ func (s *service) pidsInternal(ctx context.Context, req *task.PidsRequest) (*tas for i, p := range pids { a, err := typeurl.MarshalAny(p) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal ProcessDetails for process: %s, task: %s", p.ExecID, req.ID) + return nil, fmt.Errorf("failed to marshal ProcessDetails for process: %s, task: %s: %w", p.ExecID, req.ID, err) } proc := &containerd_v1_types.ProcessInfo{ Pid: p.ProcessID, @@ -272,7 +272,7 @@ func (s *service) killInternal(ctx context.Context, req *task.KillRequest) (*emp if s.isSandbox { pod, err := s.getPod() if err != nil { - return nil, errors.Wrapf(errdefs.ErrNotFound, "%v: task with id: '%s' not found", err, req.ID) + return nil, fmt.Errorf("%v: task with id: %q: %w", err, req.ID, errdefs.ErrNotFound) } // Send it to the POD and let it cascade on its own through all tasks. err = pod.KillTask(ctx, req.ID, req.ExecID, req.Signal, req.All) @@ -299,11 +299,11 @@ func (s *service) execInternal(ctx context.Context, req *task.ExecProcessRequest return nil, err } if req.Terminal && req.Stderr != "" { - return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "if using terminal, stderr must be empty") + return nil, fmt.Errorf("if using terminal, stderr must be empty: %w", errdefs.ErrFailedPrecondition) } var spec specs.Process if err := json.Unmarshal(req.Spec.Value, &spec); err != nil { - return nil, errors.Wrap(err, "request.Spec was not oci process") + return nil, fmt.Errorf("request.Spec was not oci process: %w", err) } err = t.CreateExec(ctx, req, &spec) if err != nil { @@ -314,7 +314,7 @@ func (s *service) execInternal(ctx context.Context, req *task.ExecProcessRequest func (s *service) diagExecInHostInternal(ctx context.Context, req *shimdiag.ExecProcessRequest) (*shimdiag.ExecProcessResponse, error) { if req.Terminal && req.Stderr != "" { - return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "if using terminal, stderr must be empty") + return nil, fmt.Errorf("if using terminal, stderr must be empty: %w", errdefs.ErrFailedPrecondition) } t, err := s.getTask(s.tid) if err != nil { @@ -353,7 +353,7 @@ func (s *service) diagListExecs(task shimTask) ([]*shimdiag.Exec, error) { func (s *service) diagTasksInternal(ctx context.Context, req *shimdiag.TasksRequest) (_ *shimdiag.TasksResponse, err error) { raw := s.taskOrPod.Load() if raw == nil { - return nil, errors.Wrapf(errdefs.ErrNotFound, "task with id: '%s' not found", s.tid) + return nil, fmt.Errorf("task with id: %q: %w", s.tid, errdefs.ErrNotFound) } resp := &shimdiag.TasksResponse{} @@ -432,7 +432,7 @@ func (s *service) closeIOInternal(ctx context.Context, req *task.CloseIORequest) func (s *service) updateInternal(ctx context.Context, req *task.UpdateTaskRequest) (*emptypb.Empty, error) { if req.Resources == nil { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "resources cannot be empty, updating container %s resources failed", req.ID) + return nil, fmt.Errorf("resources cannot be empty, updating container %s resources failed: %w", req.ID, errdefs.ErrInvalidArgument) } t, err := s.getTask(req.ID) if err != nil { @@ -476,7 +476,7 @@ func (s *service) statsInternal(ctx context.Context, req *task.StatsRequest) (*t } any, err := typeurl.MarshalAny(stats) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal Statistics for task: %s", req.ID) + return nil, fmt.Errorf("failed to marshal Statistics for task: %q: %w", req.ID, err) } return &task.StatsResponse{Stats: protobuf.FromAny(any)}, nil } diff --git a/cmd/containerd-shim-runhcs-v1/service_internal_test.go b/cmd/containerd-shim-runhcs-v1/service_internal_test.go index 20b12a251d..2340656d78 100644 --- a/cmd/containerd-shim-runhcs-v1/service_internal_test.go +++ b/cmd/containerd-shim-runhcs-v1/service_internal_test.go @@ -4,6 +4,7 @@ package main import ( "context" + "errors" "fmt" "reflect" "testing" @@ -12,12 +13,11 @@ import ( "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats" v1 "github.com/containerd/cgroups/v3/cgroup1/stats" task "github.com/containerd/containerd/api/runtime/task/v2" - "github.com/pkg/errors" ) func verifyExpectedError(t *testing.T, resp interface{}, actual, expected error) { t.Helper() - if actual == nil || errors.Cause(actual) != expected || !errors.Is(actual, expected) { //nolint:errorlint + if actual == nil || !errors.Is(actual, expected) { //nolint:errorlint t.Fatalf("expected error: %v, got: %v", expected, actual) } diff --git a/cmd/containerd-shim-runhcs-v1/start.go b/cmd/containerd-shim-runhcs-v1/start.go index acdee786e6..d23d44f050 100644 --- a/cmd/containerd-shim-runhcs-v1/start.go +++ b/cmd/containerd-shim-runhcs-v1/start.go @@ -17,7 +17,6 @@ import ( task "github.com/containerd/containerd/api/runtime/task/v2" "github.com/containerd/containerd/runtime/v2/shim" "github.com/containerd/ttrpc" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) @@ -82,7 +81,7 @@ The start command can either start a new shim or return an address to an existin // Connect to the hosting shim and get the pid c, err := winio.DialPipe(address, nil) if err != nil { - return errors.Wrap(err, "failed to connect to hosting shim") + return fmt.Errorf("failed to connect to hosting shim: %w", err) } cl := ttrpc.NewClient(c, ttrpc.WithOnClose(func() { c.Close() })) t := task.NewTaskClient(cl) @@ -93,7 +92,7 @@ The start command can either start a new shim or return an address to an existin cl.Close() c.Close() if err != nil { - return errors.Wrap(err, "failed to get shim pid from hosting shim") + return fmt.Errorf("failed to get shim pid from hosting shim: %w", err) } pid = int(cr.ShimPid) } @@ -102,7 +101,7 @@ The start command can either start a new shim or return an address to an existin if address == "" { isSandbox := ct == oci.KubernetesContainerTypeSandbox if isSandbox && idFlag != sbid { - return errors.Errorf( + return fmt.Errorf( "'id' and '%s' must match for '%s=%s'", annotations.KubernetesSandboxID, annotations.KubernetesContainerType, @@ -197,7 +196,7 @@ func getSpecAnnotations(bundlePath string) (map[string]string, error) { defer f.Close() var spec specAnnotations if err := json.NewDecoder(f).Decode(&spec); err != nil { - return nil, errors.Wrap(err, "failed to deserialize valid OCI spec") + return nil, fmt.Errorf("failed to deserialize valid OCI spec: %w", err) } return spec.Annotations, nil } diff --git a/cmd/containerd-shim-runhcs-v1/task_hcs.go b/cmd/containerd-shim-runhcs-v1/task_hcs.go index d544cb0934..2f7fba29ad 100644 --- a/cmd/containerd-shim-runhcs-v1/task_hcs.go +++ b/cmd/containerd-shim-runhcs-v1/task_hcs.go @@ -4,6 +4,7 @@ package main import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -18,7 +19,6 @@ import ( "github.com/containerd/errdefs" "github.com/containerd/typeurl/v2" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "go.opencensus.io/trace" "google.golang.org/protobuf/types/known/timestamppb" @@ -59,11 +59,11 @@ func newHcsStandaloneTask(ctx context.Context, events publisher, req *task.Creat return nil, err } if ct != oci.KubernetesContainerTypeNone { - return nil, errors.Wrapf( - errdefs.ErrFailedPrecondition, - "cannot create standalone task, expected no annotation: '%s': got '%s'", + return nil, fmt.Errorf( + "cannot create standalone task, expected no annotation: %q, got %q: %w", annotations.KubernetesContainerType, - ct) + ct, + errdefs.ErrFailedPrecondition) } owner := filepath.Base(os.Args[0]) @@ -102,7 +102,7 @@ func newHcsStandaloneTask(ctx context.Context, events publisher, req *task.Creat parent.Close() } } else if !oci.IsWCOW(s) { - return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "oci spec does not contain WCOW or LCOW spec") + return nil, fmt.Errorf("oci spec does not contain WCOW or LCOW spec: %w", errdefs.ErrFailedPrecondition) } shim, err := newHcsTask(ctx, events, parent, true, req, s) @@ -186,7 +186,8 @@ func newHcsTask( parent *uvm.UtilityVM, ownsParent bool, req *task.CreateTaskRequest, - s *specs.Spec) (_ shimTask, err error) { + s *specs.Spec, +) (_ shimTask, err error) { log.G(ctx).WithFields(logrus.Fields{ "tid": req.ID, "ownsParent": ownsParent, @@ -354,11 +355,11 @@ func (ht *hcsTask) CreateExec(ctx context.Context, req *task.ExecProcessRequest, // If the task exists or we got a request for "" which is the init task // fail. if _, loaded := ht.execs.Load(req.ExecID); loaded || req.ExecID == "" { - return errors.Wrapf(errdefs.ErrAlreadyExists, "exec: '%s' in task: '%s' already exists", req.ExecID, ht.id) + return fmt.Errorf("exec: %q in task: %q: %w", req.ExecID, ht.id, errdefs.ErrAlreadyExists) } if ht.init.State() != shimExecStateRunning { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "exec: '' in task: '%s' must be running to create additional execs", ht.id) + return fmt.Errorf("exec: \"\" in task: %q must be running to create additional execs: %w", ht.id, errdefs.ErrFailedPrecondition) } io, err := cmd.NewUpstreamIO(ctx, req.ID, req.Stdout, req.Stderr, req.Stdin, req.Terminal, ht.ioRetryTimeout) @@ -397,7 +398,7 @@ func (ht *hcsTask) GetExec(eid string) (shimExec, error) { } raw, loaded := ht.execs.Load(eid) if !loaded { - return nil, errors.Wrapf(errdefs.ErrNotFound, "exec: '%s' in task: '%s' not found", eid, ht.id) + return nil, fmt.Errorf("exec: %q in task: %q: %w", eid, ht.id, errdefs.ErrNotFound) } return raw.(shimExec), nil } @@ -425,7 +426,7 @@ func (ht *hcsTask) KillExec(ctx context.Context, eid string, signal uint32, all return err } if all && eid != "" { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot signal all for non-empty exec: '%s'", eid) + return fmt.Errorf("cannot signal all for non-empty exec: %q: %w", eid, errdefs.ErrFailedPrecondition) } if all { // We are in a kill all on the init task. Signal everything. @@ -508,7 +509,7 @@ func (ht *hcsTask) DeleteExec(ctx context.Context, eid string) (int, uint32, tim select { case <-time.After(30 * time.Second): log.G(ctx).Error("timed out waiting for resource cleanup") - return 0, 0, time.Time{}, errors.Wrap(hcs.ErrTimeout, "waiting for container resource cleanup") + return 0, 0, time.Time{}, fmt.Errorf("waiting for container resource cleanup: %w", hcs.ErrTimeout) case <-ht.closed: } @@ -573,7 +574,7 @@ func (ht *hcsTask) Pids(ctx context.Context) ([]*runhcsopts.ProcessDetails, erro props, err := ht.c.Properties(ctx, schema1.PropertyTypeProcessList) if err != nil { if isStatsNotFound(err) { - return nil, errors.Wrapf(errdefs.ErrNotFound, "failed to fetch pids: %s", err) + return nil, fmt.Errorf("failed to fetch pids: %w: %w", err, errdefs.ErrNotFound) } return nil, err } @@ -827,7 +828,7 @@ func (ht *hcsTask) Stats(ctx context.Context) (*stats.Statistics, error) { props, err := ht.c.PropertiesV2(ctx, hcsschema.PTStatistics) if err != nil { if isStatsNotFound(err) { - return nil, errors.Wrapf(errdefs.ErrNotFound, "failed to fetch stats: %s", err) + return nil, fmt.Errorf("failed to fetch stats: %w: %w", err, errdefs.ErrNotFound) } return nil, err } @@ -852,7 +853,7 @@ func (ht *hcsTask) Stats(ctx context.Context) (*stats.Statistics, error) { func (ht *hcsTask) Update(ctx context.Context, req *task.UpdateTaskRequest) error { resources, err := typeurl.UnmarshalAny(req.Resources) if err != nil { - return errors.Wrapf(err, "failed to unmarshal resources for container %s update request", req.ID) + return fmt.Errorf("failed to unmarshal resources for container %q update request: %w", req.ID, err) } if err := verifyTaskUpdateResourcesType(resources); err != nil { @@ -1024,7 +1025,7 @@ func (ht *hcsTask) updateWCOWContainerMount(ctx context.Context, resources *ctrd // about the isolated case. hostPath, err := fs.ResolvePath(resources.HostPath) if err != nil { - return errors.Wrapf(err, "failed to resolve path for hostPath %s", resources.HostPath) + return fmt.Errorf("failed to resolve path for hostPath %q: %w", resources.HostPath, err) } // process isolated windows container @@ -1034,7 +1035,7 @@ func (ht *hcsTask) updateWCOWContainerMount(ctx context.Context, resources *ctrd ReadOnly: resources.ReadOnly, } if err := ht.requestAddContainerMount(ctx, resourcepaths.SiloMappedDirectoryResourcePath, settings); err != nil { - return errors.Wrapf(err, "failed to add mount to process isolated container") + return fmt.Errorf("failed to add mount to process isolated container: %w", err) } } else { // if it is a mount request for a running hyperV WCOW container, we should first mount volume to the @@ -1052,7 +1053,7 @@ func (ht *hcsTask) updateWCOWContainerMount(ctx context.Context, resources *ctrd ReadOnly: resources.ReadOnly, } if err := ht.requestAddContainerMount(ctx, resourcepaths.SiloMappedDirectoryResourcePath, settings); err != nil { - return errors.Wrapf(err, "failed to add mount to hyperV container") + return fmt.Errorf("failed to add mount to hyperV container: %w", err) } } return nil diff --git a/cmd/containerd-shim-runhcs-v1/task_test.go b/cmd/containerd-shim-runhcs-v1/task_test.go index 2ae9f0a1c2..9cb6e526b7 100644 --- a/cmd/containerd-shim-runhcs-v1/task_test.go +++ b/cmd/containerd-shim-runhcs-v1/task_test.go @@ -4,6 +4,8 @@ package main import ( "context" + "errors" + "fmt" "time" "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" @@ -15,7 +17,6 @@ import ( "github.com/containerd/errdefs" typeurl "github.com/containerd/typeurl/v2" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) var _ = (shimTask)(&testShimTask{}) @@ -106,7 +107,7 @@ func (tst *testShimTask) DumpGuestStacks(ctx context.Context) string { func (tst *testShimTask) Update(ctx context.Context, req *task.UpdateTaskRequest) error { data, err := typeurl.UnmarshalAny(req.Resources) if err != nil { - return errors.Wrapf(err, "failed to unmarshal resources for container %s update request", req.ID) + return fmt.Errorf("failed to unmarshal resources for container %q update request: %w", req.ID, err) } if err := verifyTaskUpdateResourcesType(data); err != nil { return err diff --git a/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go b/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go index 423c5c23de..7edf19615d 100644 --- a/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go +++ b/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go @@ -4,6 +4,7 @@ package main import ( "context" + "fmt" "sync" "time" @@ -20,7 +21,6 @@ import ( "github.com/containerd/errdefs" typeurl "github.com/containerd/typeurl/v2" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "go.opencensus.io/trace" ) @@ -99,7 +99,7 @@ func (wpst *wcowPodSandboxTask) ID() string { } func (wpst *wcowPodSandboxTask) CreateExec(ctx context.Context, req *task.ExecProcessRequest, s *specs.Process) error { - return errors.Wrap(errdefs.ErrNotImplemented, "WCOW Pod task should never issue exec") + return fmt.Errorf("WCOW Pod task should never issue exec: %w", errdefs.ErrNotImplemented) } func (wpst *wcowPodSandboxTask) GetExec(eid string) (shimExec, error) { @@ -107,7 +107,7 @@ func (wpst *wcowPodSandboxTask) GetExec(eid string) (shimExec, error) { return wpst.init, nil } // Cannot exec in an a WCOW sandbox container so all non-init calls fail here. - return nil, errors.Wrapf(errdefs.ErrNotFound, "exec: '%s' in task: '%s' not found", eid, wpst.id) + return nil, fmt.Errorf("exec: %q in task: %q: %w", eid, wpst.id, errdefs.ErrNotFound) } func (wpst *wcowPodSandboxTask) ListExecs() ([]shimExec, error) { @@ -120,7 +120,7 @@ func (wpst *wcowPodSandboxTask) KillExec(ctx context.Context, eid string, signal return err } if all && eid != "" { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot signal all for non-empty exec: '%s'", eid) + return fmt.Errorf("cannot signal all for non-empty exec: %q: %w", eid, errdefs.ErrFailedPrecondition) } err = e.Kill(ctx, signal) if err != nil { @@ -275,7 +275,7 @@ func (wpst *wcowPodSandboxTask) Update(ctx context.Context, req *task.UpdateTask resources, err := typeurl.UnmarshalAny(req.Resources) if err != nil { - return errors.Wrapf(err, "failed to unmarshal resources for container %s update request", req.ID) + return fmt.Errorf("failed to unmarshal resources for container %q update request: %w", req.ID, err) } if err := verifyTaskUpdateResourcesType(resources); err != nil { diff --git a/cmd/gcs/main.go b/cmd/gcs/main.go index 25751763dd..332e839c0e 100644 --- a/cmd/gcs/main.go +++ b/cmd/gcs/main.go @@ -17,7 +17,6 @@ import ( cgroups "github.com/containerd/cgroups/v3/cgroup1" cgroupstats "github.com/containerd/cgroups/v3/cgroup1/stats" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "go.opencensus.io/trace" @@ -121,12 +120,12 @@ func runWithRestartMonitor(arg0 string, args ...string) { func startTimeSyncService() error { ptpClassDir, err := os.Open("/sys/class/ptp") if err != nil { - return errors.Wrap(err, "failed to open PTP class directory") + return fmt.Errorf("failed to open PTP class directory: %w", err) } ptpDirList, err := ptpClassDir.Readdirnames(-1) if err != nil { - return errors.Wrap(err, "failed to list PTP class directory") + return fmt.Errorf("failed to list PTP class directory: %w", err) } var ptpDirPath string @@ -137,7 +136,7 @@ func startTimeSyncService() error { clockNameFilePath := filepath.Join(ptpClassDir.Name(), ptpDirPath, "clock_name") buf, err := os.ReadFile(clockNameFilePath) if err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "failed to read clock name file at %s", clockNameFilePath) + return fmt.Errorf("failed to read clock name: %w", err) } if string(buf) == expectedClockName { @@ -147,7 +146,7 @@ func startTimeSyncService() error { } if !found { - return errors.Errorf("no PTP device found with name \"%s\"", expectedClockName) + return fmt.Errorf("no PTP device found with name %q", expectedClockName) } // create chronyd config file @@ -155,9 +154,9 @@ func startTimeSyncService() error { // chronyd config file take from: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/time-sync chronydConfigString := fmt.Sprintf("refclock PHC %s poll 3 dpoll -2 offset 0 stratum 2\nmakestep 0.1 -1\n", ptpDevPath) chronydConfPath := "/tmp/chronyd.conf" - err = os.WriteFile(chronydConfPath, []byte(chronydConfigString), 0644) + err = os.WriteFile(chronydConfPath, []byte(chronydConfigString), 0o644) if err != nil { - return errors.Wrapf(err, "failed to create chronyd conf file %s", chronydConfPath) + return fmt.Errorf("failed to create chronyd conf file: %w", err) } // start chronyd. Do NOT start chronyd as daemon because creating a daemon @@ -220,7 +219,7 @@ func main() { var logWriter *os.File if *logFile != "" { - logFileHandle, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + logFileHandle, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) if err != nil { logrus.WithFields(logrus.Fields{ "path": *logFile, @@ -285,7 +284,7 @@ func main() { if err := os.WriteFile( "/proc/sys/kernel/core_pattern", []byte(*coreDumpLoc), - 0644, + 0o644, ); err != nil { logrus.WithError(err).Fatal("failed to set core dump location") } @@ -333,7 +332,7 @@ func main() { // Write 1 to memory.use_hierarchy on the root cgroup to enable hierarchy // support. This needs to be set before we create any cgroups as the write // will fail otherwise. - if err := os.WriteFile("/sys/fs/cgroup/memory/memory.use_hierarchy", []byte("1"), 0644); err != nil { + if err := os.WriteFile("/sys/fs/cgroup/memory/memory.use_hierarchy", []byte("1"), 0o644); err != nil { logrus.WithError(err).Fatal("failed to enable hierarchy support for root cgroup") } diff --git a/cmd/gcstools/installdrivers.go b/cmd/gcstools/installdrivers.go index e382765827..22dc20b3b4 100644 --- a/cmd/gcstools/installdrivers.go +++ b/cmd/gcstools/installdrivers.go @@ -5,6 +5,7 @@ package main import ( "context" + "errors" "fmt" "io/fs" "os" @@ -14,7 +15,6 @@ import ( "github.com/Microsoft/hcsshim/internal/guest/storage/overlay" "github.com/Microsoft/hcsshim/internal/log" - "github.com/pkg/errors" ) const moduleExtension = ".ko" @@ -51,7 +51,7 @@ func install(ctx context.Context) error { modules := []string{} if walkErr := filepath.Walk(rootPath, func(path string, info os.FileInfo, err error) error { if err != nil { - return errors.Wrap(err, "failed to read directory while walking dir") + return fmt.Errorf("failed to read directory while walking dir: %w", err) } if !info.IsDir() && filepath.Ext(info.Name()) == moduleExtension { moduleName := strings.TrimSuffix(info.Name(), moduleExtension) @@ -67,7 +67,7 @@ func install(ctx context.Context) error { cmd := exec.Command("depmod", depmodArgs...) out, err := cmd.CombinedOutput() if err != nil { - return errors.Wrapf(err, "failed to run depmod with args %v: %s", depmodArgs, out) + return fmt.Errorf("failed to run depmod with args %v: %w (output: %s)", depmodArgs, err, out) } // run modprobe for every module name found @@ -79,7 +79,7 @@ func install(ctx context.Context) error { out, err = cmd.CombinedOutput() if err != nil { - return errors.Wrapf(err, "failed to run modprobe with args %v: %s", modprobeArgs, out) + return fmt.Errorf("failed to run modprobe with args %v: %w (output: %s)", modprobeArgs, err, out) } return nil diff --git a/cmd/ncproxy/computeagent_cache.go b/cmd/ncproxy/computeagent_cache.go index bc2200783f..0555b95daf 100644 --- a/cmd/ncproxy/computeagent_cache.go +++ b/cmd/ncproxy/computeagent_cache.go @@ -3,9 +3,8 @@ package main import ( + "errors" "sync" - - "github.com/pkg/errors" ) var errNilCache = errors.New("cannot access a nil cache") diff --git a/cmd/ncproxy/config.go b/cmd/ncproxy/config.go index cce5d917e1..352e1520b2 100644 --- a/cmd/ncproxy/config.go +++ b/cmd/ncproxy/config.go @@ -4,11 +4,11 @@ package main import ( "encoding/json" + "errors" "fmt" "os" "path/filepath" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -30,20 +30,20 @@ var configCommand = cli.Command{ configData, err := json.MarshalIndent(defaultConfig(), "", " ") if err != nil { - return errors.Wrap(err, "failed to marshal ncproxy config to json") + return fmt.Errorf("failed to marshal ncproxy config to json: %w", err) } if file != "" { // Make the directory if it doesn't exist. if _, err := os.Stat(filepath.Dir(file)); err != nil { - if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil { - return errors.Wrap(err, "failed to make path to config file") + if err := os.MkdirAll(filepath.Dir(file), 0o700); err != nil { + return fmt.Errorf("failed to make path to config file: %w", err) } } if err := os.WriteFile( file, []byte(configData), - 0700, + 0o700, ); err != nil { return err } @@ -96,7 +96,7 @@ func loadConfig(path string) (*config, error) { func readConfig(path string) (*config, error) { data, err := os.ReadFile(path) if err != nil { - return nil, errors.Wrap(err, "failed to read config file") + return nil, fmt.Errorf("failed to read config file: %w", err) } conf := &config{} if err := json.Unmarshal(data, conf); err != nil { diff --git a/cmd/ncproxy/hcn.go b/cmd/ncproxy/hcn.go index 75139c569d..12853654b3 100644 --- a/cmd/ncproxy/hcn.go +++ b/cmd/ncproxy/hcn.go @@ -5,6 +5,7 @@ package main import ( "context" "encoding/json" + "errors" "fmt" "net" "strings" @@ -12,7 +13,6 @@ import ( "github.com/Microsoft/hcsshim/hcn" "github.com/Microsoft/hcsshim/internal/log" ncproxygrpc "github.com/Microsoft/hcsshim/pkg/ncproxy/ncproxygrpc/v1" - "github.com/pkg/errors" ) func hcnEndpointToEndpointResponse(ep *hcn.HostComputeEndpoint) (_ *ncproxygrpc.GetEndpointResponse, err error) { @@ -36,12 +36,12 @@ func hcnEndpointToEndpointResponse(ep *hcn.HostComputeEndpoint) (_ *ncproxygrpc. ipConfigInfos := ep.IpConfigurations // there may be one ipv4 and/or one ipv6 configuration for an endpoint if len(ipConfigInfos) == 0 || len(ipConfigInfos) > 2 { - return nil, errors.Errorf("invalid number (%v) of ip configuration information for endpoint %v", len(ipConfigInfos), ep.Name) + return nil, fmt.Errorf("invalid number (%v) of ip configuration information for endpoint %v", len(ipConfigInfos), ep.Name) } for _, ipConfig := range ipConfigInfos { ip := net.ParseIP(ipConfig.IpAddress) if ip == nil { - return nil, errors.Errorf("failed to parse IP address %v", ipConfig.IpAddress) + return nil, fmt.Errorf("failed to parse IP address %v", ipConfig.IpAddress) } if ip.To4() != nil { // this is an IPv4 address @@ -121,7 +121,7 @@ func constructEndpointPolicies(req *ncproxygrpc.HcnEndpointPolicies) ([]hcn.Endp } iovJSON, err := json.Marshal(iovSettings) if err != nil { - return []hcn.EndpointPolicy{}, errors.Wrap(err, "failed to marshal IovPolicySettings") + return []hcn.EndpointPolicy{}, fmt.Errorf("failed to marshal IovPolicySettings: %w", err) } policy := hcn.EndpointPolicy{ Type: hcn.IOV, @@ -136,7 +136,7 @@ func constructEndpointPolicies(req *ncproxygrpc.HcnEndpointPolicies) ([]hcn.Endp } portPolicyJSON, err := json.Marshal(portPolicy) if err != nil { - return []hcn.EndpointPolicy{}, errors.Wrap(err, "failed to marshal portname") + return []hcn.EndpointPolicy{}, fmt.Errorf("failed to marshal portname: %w", err) } policy := hcn.EndpointPolicy{ Type: hcn.PortName, @@ -152,7 +152,7 @@ func createHCNNetwork(ctx context.Context, req *ncproxygrpc.HostComputeNetworkSe // Check if the network already exists, and if so return error. _, err := hcn.GetNetworkByName(req.Name) if err == nil { - return nil, errors.Errorf("network with name %q already exists", req.Name) + return nil, fmt.Errorf("network with name %q already exists", req.Name) } policies := []hcn.NetworkPolicy{} @@ -163,20 +163,20 @@ func createHCNNetwork(ctx context.Context, req *ncproxygrpc.HostComputeNetworkSe extSwitch, err := hcn.GetNetworkByName(req.SwitchName) if err != nil { if _, ok := err.(hcn.NetworkNotFoundError); ok { //nolint:errorlint - return nil, errors.Errorf("no network/switch with name `%s` found", req.SwitchName) + return nil, fmt.Errorf("no network/switch with name %q found", req.SwitchName) } - return nil, errors.Wrapf(err, "failed to get network/switch with name %q", req.SwitchName) + return nil, fmt.Errorf("failed to get network/switch with name %q: %q", req.SwitchName, err) } // Get layer ID and use this as the basis for what to layer the new network over. if extSwitch.Health.Extra.LayeredOn == "" { - return nil, errors.Errorf("no layer ID found for network %q found", extSwitch.Id) + return nil, fmt.Errorf("no layer ID found for network %q found", extSwitch.Id) } layerPolicy := hcn.LayerConstraintNetworkPolicySetting{LayerId: extSwitch.Health.Extra.LayeredOn} data, err := json.Marshal(layerPolicy) if err != nil { - return nil, errors.Wrap(err, "failed to marshal layer policy") + return nil, fmt.Errorf("failed to marshal layer policy: %w", err) } netPolicy := hcn.NetworkPolicy{ @@ -238,7 +238,7 @@ func createHCNNetwork(ctx context.Context, req *ncproxygrpc.HostComputeNetworkSe network, err = network.Create() if err != nil { - return nil, errors.Wrapf(err, "failed to create HNS network %q", req.Name) + return nil, fmt.Errorf("failed to create HNS network %q: %w", req.Name, err) } return network, nil @@ -355,7 +355,7 @@ func createHCNEndpoint(ctx context.Context, network *hcn.HostComputeNetwork, req if req.Policies != nil { policies, err = constructEndpointPolicies(req.Policies) if err != nil { - return nil, errors.Wrap(err, "failed to construct endpoint policies") + return nil, fmt.Errorf("failed to construct endpoint policies: %w", err) } } @@ -380,7 +380,7 @@ func createHCNEndpoint(ctx context.Context, network *hcn.HostComputeNetwork, req } endpoint, err = endpoint.Create() if err != nil { - return nil, errors.Wrap(err, "failed to create HNS endpoint") + return nil, fmt.Errorf("failed to create HNS endpoint: %w", err) } return endpoint, nil @@ -391,7 +391,7 @@ func createHCNEndpoint(ctx context.Context, network *hcn.HostComputeNetwork, req func getHostDefaultNamespace() (string, error) { namespaces, err := hcn.ListNamespaces() if err != nil { - return "", errors.Wrapf(err, "failed list namespaces") + return "", fmt.Errorf("failed to list namespaces: %w", err) } for _, ns := range namespaces { diff --git a/cmd/ncproxy/ncproxy.go b/cmd/ncproxy/ncproxy.go index af2f18a860..c27c328b8d 100644 --- a/cmd/ncproxy/ncproxy.go +++ b/cmd/ncproxy/ncproxy.go @@ -5,13 +5,15 @@ package main import ( "context" "encoding/json" + "errors" + "fmt" "time" "github.com/Microsoft/go-winio" "github.com/containerd/containerd/protobuf" "github.com/containerd/ttrpc" typeurl "github.com/containerd/typeurl/v2" - "github.com/pkg/errors" + "go.opencensus.io/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -113,7 +115,7 @@ func (s *grpcService) AddNIC(ctx context.Context, req *ncproxygrpc.AddNICRequest if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.EndpointName) } - return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", req.EndpointName) + return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.EndpointName, err) } anyEndpoint, err = typeurl.MarshalAny(ep) @@ -143,7 +145,7 @@ func (s *grpcService) AddNIC(ctx context.Context, req *ncproxygrpc.AddNICRequest } policies := []hcn.EndpointPolicy{iovPolicy} if err := modifyEndpoint(ctx, ep.Id, policies, hcn.RequestTypeUpdate); err != nil { - return nil, errors.Wrap(err, "failed to add policy to endpoint") + return nil, fmt.Errorf("failed to add policy to endpoint: %w", err) } } } @@ -183,7 +185,7 @@ func (s *grpcService) ModifyNIC(ctx context.Context, req *ncproxygrpc.ModifyNICR if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.EndpointName) } - return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", req.EndpointName) + return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.EndpointName, err) } anyEndpoint, err := typeurl.MarshalAny(ep) @@ -237,14 +239,14 @@ func (s *grpcService) ModifyNIC(ctx context.Context, req *ncproxygrpc.ModifyNICR return nil, err } if err := modifyEndpoint(ctx, ep.Id, policies, hcn.RequestTypeUpdate); err != nil { - return nil, errors.Wrap(err, "failed to modify network adapter") + return nil, fmt.Errorf("failed to modify network adapter: %w", err) } if err := modifyEndpoint(ctx, ep.Id, policies, hcn.RequestTypeRemove); err != nil { - return nil, errors.Wrap(err, "failed to modify network adapter") + return nil, fmt.Errorf("failed to modify network adapter: %w", err) } } else { if err := modifyEndpoint(ctx, ep.Id, policies, hcn.RequestTypeUpdate); err != nil { - return nil, errors.Wrap(err, "failed to modify network adapter") + return nil, fmt.Errorf("failed to modify network adapter: %w", err) } if _, err := agent.ModifyNIC(ctx, caReq); err != nil { return nil, err @@ -284,7 +286,7 @@ func (s *grpcService) DeleteNIC(ctx context.Context, req *ncproxygrpc.DeleteNICR if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.EndpointName) } - return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", req.EndpointName) + return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.EndpointName, err) } anyEndpoint, err = typeurl.MarshalAny(ep) if err != nil { @@ -387,7 +389,7 @@ func (s *grpcService) CreateEndpoint(ctx context.Context, req *ncproxygrpc.Creat if _, ok := err.(hcn.NetworkNotFoundError); ok { //nolint:errorlint return nil, status.Errorf(codes.NotFound, "no network with name `%s` found", reqEndpoint.NetworkName) } - return nil, errors.Wrapf(err, "failed to get network with name %q", reqEndpoint.NetworkName) + return nil, fmt.Errorf("failed to get network with name %q: %w", reqEndpoint.NetworkName, err) } ep, err := createHCNEndpoint(ctx, network, reqEndpoint) if err != nil { @@ -403,9 +405,9 @@ func (s *grpcService) CreateEndpoint(ctx context.Context, req *ncproxygrpc.Creat return nil, status.Errorf(codes.InvalidArgument, "received empty field in request: %+v", req) } - network, err := s.ncpNetworkingStore.GetNetworkByName(ctx, reqEndpoint.NetworkName) - if err != nil || network == nil { - return nil, errors.Wrapf(err, "network %v does not exist", reqEndpoint.NetworkName) + _, err := s.ncpNetworkingStore.GetNetworkByName(ctx, reqEndpoint.NetworkName) + if err != nil { + return nil, fmt.Errorf("network %v does not exist: %w", reqEndpoint.NetworkName, err) } epSettings := &ncproxynetworking.EndpointSettings{ Name: reqEndpoint.Name, @@ -452,7 +454,7 @@ func (s *grpcService) AddEndpoint(ctx context.Context, req *ncproxygrpc.AddEndpo if endpt, err := s.ncpNetworkingStore.GetEndpointByName(ctx, req.Name); err == nil { endpt.NamespaceID = req.NamespaceID if err := s.ncpNetworkingStore.UpdateEndpoint(ctx, endpt); err != nil { - return nil, errors.Wrapf(err, "failed to update endpoint with name `%s`", req.Name) + return nil, fmt.Errorf("failed to update endpoint with name %q: %w", req.Name, err) } } else { if !errors.Is(err, ncproxystore.ErrBucketNotFound) && !errors.Is(err, ncproxystore.ErrKeyNotFound) { @@ -464,7 +466,7 @@ func (s *grpcService) AddEndpoint(ctx context.Context, req *ncproxygrpc.AddEndpo if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.Name) } - return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", req.Name) + return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.Name, err) } if req.AttachToHost { if req.NamespaceID != "" { @@ -483,7 +485,7 @@ func (s *grpcService) AddEndpoint(ctx context.Context, req *ncproxygrpc.AddEndpo span.AddAttributes(trace.StringAttribute("namespaceID", req.NamespaceID)) } if err := hcn.AddNamespaceEndpoint(req.NamespaceID, ep.Id); err != nil { - return nil, errors.Wrapf(err, "failed to add endpoint with name %q to namespace", req.Name) + return nil, fmt.Errorf("failed to add endpoint with name %q to namespace: %w", req.Name, err) } } @@ -504,7 +506,7 @@ func (s *grpcService) DeleteEndpoint(ctx context.Context, req *ncproxygrpc.Delet if _, err := s.ncpNetworkingStore.GetEndpointByName(ctx, req.Name); err == nil { if err := s.ncpNetworkingStore.DeleteEndpoint(ctx, req.Name); err != nil { - return nil, errors.Wrapf(err, "failed to delete endpoint with name %q", req.Name) + return nil, fmt.Errorf("failed to delete endpoint with name %q: %w", req.Name, err) } } else { if !errors.Is(err, ncproxystore.ErrBucketNotFound) && !errors.Is(err, ncproxystore.ErrKeyNotFound) { @@ -516,11 +518,11 @@ func (s *grpcService) DeleteEndpoint(ctx context.Context, req *ncproxygrpc.Delet if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.Name) } - return nil, errors.Wrapf(err, "failed to get endpoint with name %q", req.Name) + return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.Name, err) } if err = ep.Delete(); err != nil { - return nil, errors.Wrapf(err, "failed to delete endpoint with name %q", req.Name) + return nil, fmt.Errorf("failed to delete endpoint with name %q: %w", req.Name, err) } } return &ncproxygrpc.DeleteEndpointResponse{}, nil @@ -540,7 +542,7 @@ func (s *grpcService) DeleteNetwork(ctx context.Context, req *ncproxygrpc.Delete if _, err := s.ncpNetworkingStore.GetNetworkByName(ctx, req.Name); err == nil { if err := s.ncpNetworkingStore.DeleteNetwork(ctx, req.Name); err != nil { - return nil, errors.Wrapf(err, "failed to delete network with name %q", req.Name) + return nil, fmt.Errorf("failed to delete network with name %q: %w", req.Name, err) } } else { if !errors.Is(err, ncproxystore.ErrBucketNotFound) && !errors.Is(err, ncproxystore.ErrKeyNotFound) { @@ -551,11 +553,11 @@ func (s *grpcService) DeleteNetwork(ctx context.Context, req *ncproxygrpc.Delete if _, ok := err.(hcn.NetworkNotFoundError); ok { //nolint:errorlint return nil, status.Errorf(codes.NotFound, "no network with name `%s` found", req.Name) } - return nil, errors.Wrapf(err, "failed to get network with name %q", req.Name) + return nil, fmt.Errorf("failed to get network with name %q: %w", req.Name, err) } if err = network.Delete(); err != nil { - return nil, errors.Wrapf(err, "failed to delete network with name %q", req.Name) + return nil, fmt.Errorf("failed to delete network with name %q: %w", req.Name, err) } } @@ -618,7 +620,7 @@ func (s *grpcService) GetEndpoint(ctx context.Context, req *ncproxygrpc.GetEndpo if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.Name) } - return nil, errors.Wrapf(err, "failed to get endpoint with name %q", req.Name) + return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.Name, err) } return hcnEndpointToEndpointResponse(ep) } @@ -632,12 +634,12 @@ func (s *grpcService) GetEndpoints(ctx context.Context, req *ncproxygrpc.GetEndp rawHCNEndpoints, err := hcn.ListEndpoints() if err != nil { - return nil, errors.Wrap(err, "failed to get HNS endpoints") + return nil, fmt.Errorf("failed to get HNS endpoints: %w", err) } rawNCProxyEndpoints, err := s.ncpNetworkingStore.ListEndpoints(ctx) if err != nil && !errors.Is(err, ncproxystore.ErrBucketNotFound) { - return nil, errors.Wrap(err, "failed to get ncproxy networking endpoints") + return nil, fmt.Errorf("failed to get ncproxy networking endpoints: %w", err) } for _, endpoint := range rawHCNEndpoints { @@ -697,7 +699,7 @@ func (s *grpcService) GetNetwork(ctx context.Context, req *ncproxygrpc.GetNetwor if _, ok := err.(hcn.NetworkNotFoundError); ok { //nolint:errorlint return nil, status.Errorf(codes.NotFound, "no network with name `%s` found", req.Name) } - return nil, errors.Wrapf(err, "failed to get network with name %q", req.Name) + return nil, fmt.Errorf("failed to get network with name %q: %w", req.Name, err) } return hcnNetworkToNetworkResponse(ctx, network) @@ -712,12 +714,12 @@ func (s *grpcService) GetNetworks(ctx context.Context, req *ncproxygrpc.GetNetwo rawHCNNetworks, err := hcn.ListNetworks() if err != nil { - return nil, errors.Wrap(err, "failed to get HNS networks") + return nil, fmt.Errorf("failed to get HNS networks: %w", err) } rawNCProxyNetworks, err := s.ncpNetworkingStore.ListNetworks(ctx) if err != nil && !errors.Is(err, ncproxystore.ErrBucketNotFound) { - return nil, errors.Wrap(err, "failed to get ncproxy networking networks") + return nil, fmt.Errorf("failed to get ncproxy networking networks: %w", err) } for _, network := range rawHCNNetworks { @@ -763,7 +765,7 @@ func newTTRPCService(ctx context.Context, agent *computeAgentCache, agentStore * func getComputeAgentClient(agentAddr string) (*computeAgentClient, error) { conn, err := winioDialPipe(agentAddr, nil) if err != nil { - return nil, errors.Wrap(err, "failed to connect to compute agent service") + return nil, fmt.Errorf("failed to connect to compute agent service: %w", err) } raw := ttrpcNewClient( conn, diff --git a/cmd/ncproxy/run.go b/cmd/ncproxy/run.go index a4509052ce..adb95746e3 100644 --- a/cmd/ncproxy/run.go +++ b/cmd/ncproxy/run.go @@ -4,6 +4,7 @@ package main import ( "context" + "errors" "fmt" "io" "os" @@ -16,7 +17,7 @@ import ( "github.com/Microsoft/go-winio/pkg/etwlogrus" "github.com/Microsoft/go-winio/pkg/guid" "github.com/containerd/ttrpc" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/urfave/cli" "go.opencensus.io/plugin/ocgrpc" @@ -173,7 +174,7 @@ func run(clicontext *cli.Context) error { // If a log dir was provided, make sure it exists. if _, err := os.Stat(logDir); err != nil { if err := os.MkdirAll(logDir, 0); err != nil { - return errors.Wrap(err, "failed to make log directory") + return fmt.Errorf("failed to make log directory: %w", err) } } } @@ -208,7 +209,7 @@ func run(clicontext *cli.Context) error { ctx := context.Background() conf, err := loadConfig(configPath) if err != nil { - return errors.Wrap(err, "failed getting configuration file") + return fmt.Errorf("failed getting configuration file: %w", err) } if conf.GRPCAddr == "" { @@ -269,7 +270,7 @@ func run(clicontext *cli.Context) error { dir := filepath.Dir(dbPath) if _, err := os.Stat(dir); err != nil { if err := os.MkdirAll(dir, 0); err != nil { - return errors.Wrap(err, "failed to make database directory") + return fmt.Errorf("failed to make database directory: %w", err) } } } @@ -306,7 +307,7 @@ func run(clicontext *cli.Context) error { log.G(ctx).Info("Received interrupt. Closing") case err := <-serveErr: if err != nil { - return errors.Wrap(err, "server failure") + return fmt.Errorf("server failure: %w", err) } case <-serviceDone: log.G(ctx).Info("Windows service stopped or shutdown") diff --git a/cmd/ncproxy/server.go b/cmd/ncproxy/server.go index ac808b5fcf..3bbe8333ed 100644 --- a/cmd/ncproxy/server.go +++ b/cmd/ncproxy/server.go @@ -4,6 +4,8 @@ package main import ( "context" + "errors" + "fmt" "net" "strings" "sync" @@ -16,7 +18,7 @@ import ( ncproxygrpc "github.com/Microsoft/hcsshim/pkg/ncproxy/ncproxygrpc/v1" "github.com/Microsoft/hcsshim/pkg/octtrpc" "github.com/containerd/ttrpc" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" "go.opencensus.io/plugin/ocgrpc" @@ -205,7 +207,7 @@ func reconnectComputeAgents(ctx context.Context, agentStore *ncproxystore.Comput func disconnectComputeAgents(ctx context.Context, containerIDToComputeAgent *computeAgentCache) error { agents, err := containerIDToComputeAgent.getAllAndClear() if err != nil { - return errors.Wrapf(err, "failed to get all cached compute agent clients") + return fmt.Errorf("failed to get all cached compute agent clients: %w", err) } for _, agent := range agents { if err := agent.Close(); err != nil { diff --git a/cmd/ncproxy/server_test.go b/cmd/ncproxy/server_test.go index fab2bbe136..aeaada0fc9 100644 --- a/cmd/ncproxy/server_test.go +++ b/cmd/ncproxy/server_test.go @@ -4,6 +4,7 @@ package main import ( "context" + "errors" "net" "path/filepath" "testing" @@ -17,7 +18,7 @@ import ( nodenetsvc "github.com/Microsoft/hcsshim/pkg/ncproxy/nodenetsvc/v1" nodenetsvcMock "github.com/Microsoft/hcsshim/pkg/ncproxy/nodenetsvc/v1/mock" "github.com/containerd/ttrpc" - "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" "go.uber.org/mock/gomock" "google.golang.org/grpc/codes" diff --git a/cmd/runhcs/create-scratch.go b/cmd/runhcs/create-scratch.go index 34a7500a5e..1e36a1a253 100644 --- a/cmd/runhcs/create-scratch.go +++ b/cmd/runhcs/create-scratch.go @@ -4,13 +4,14 @@ package main import ( gcontext "context" + "errors" + "fmt" "github.com/Microsoft/hcsshim/internal/appargs" "github.com/Microsoft/hcsshim/internal/lcow" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -74,14 +75,14 @@ var createScratchCommand = cli.Command{ convertUVM, err := uvm.CreateLCOW(ctx, opts) if err != nil { - return errors.Wrapf(err, "failed to create '%s'", opts.ID) + return fmt.Errorf("failed to create %q: %w", opts.ID, err) } defer convertUVM.Close() if err := convertUVM.Start(ctx); err != nil { - return errors.Wrapf(err, "failed to start '%s'", opts.ID) + return fmt.Errorf("failed to start %q: %w", opts.ID, err) } if err := lcow.CreateScratch(ctx, convertUVM, dest, sizeGB, context.String("cache-path")); err != nil { - return errors.Wrapf(err, "failed to create ext4vhdx for '%s'", opts.ID) + return fmt.Errorf("failed to create ext4vhdx for %q: %w", opts.ID, err) } return nil diff --git a/cmd/runhcs/prepare-disk.go b/cmd/runhcs/prepare-disk.go index ba9e0d370b..a87746d291 100644 --- a/cmd/runhcs/prepare-disk.go +++ b/cmd/runhcs/prepare-disk.go @@ -4,13 +4,14 @@ package main import ( gcontext "context" + "errors" + "fmt" "github.com/Microsoft/hcsshim/internal/appargs" "github.com/Microsoft/hcsshim/internal/lcow" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -51,14 +52,14 @@ var prepareDiskCommand = cli.Command{ preparediskUVM, err := uvm.CreateLCOW(ctx, opts) if err != nil { - return errors.Wrapf(err, "failed to create '%s'", opts.ID) + return fmt.Errorf("failed to create %q: %w", opts.ID, err) } defer preparediskUVM.Close() if err := preparediskUVM.Start(ctx); err != nil { - return errors.Wrapf(err, "failed to start '%s'", opts.ID) + return fmt.Errorf("failed to start %q: %w", opts.ID, err) } if err := lcow.FormatDisk(ctx, preparediskUVM, dest); err != nil { - return errors.Wrapf(err, "failed to format disk '%s' with ext4", opts.ID) + return fmt.Errorf("failed to format disk %q with ext4: %w", opts.ID, err) } return nil diff --git a/cmd/runhcs/vm.go b/cmd/runhcs/vm.go index 0b1374bfcf..a2507597b2 100644 --- a/cmd/runhcs/vm.go +++ b/cmd/runhcs/vm.go @@ -5,6 +5,7 @@ package main import ( gcontext "context" "encoding/json" + "errors" "fmt" "io" "net" @@ -16,7 +17,7 @@ import ( "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/runhcs" "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/urfave/cli" ) diff --git a/cmd/wclayer/volumemountutils.go b/cmd/wclayer/volumemountutils.go index b9a02e8477..bcf3dbd350 100644 --- a/cmd/wclayer/volumemountutils.go +++ b/cmd/wclayer/volumemountutils.go @@ -5,10 +5,10 @@ package main // Simple wrappers around SetVolumeMountPoint and DeleteVolumeMountPoint import ( + "fmt" "path/filepath" "strings" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -16,7 +16,7 @@ import ( // https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setvolumemountpointw func setVolumeMountPoint(targetPath string, volumePath string) error { if !strings.HasPrefix(volumePath, "\\\\?\\Volume{") { - return errors.Errorf("unable to mount non-volume path %s", volumePath) + return fmt.Errorf("unable to mount non-volume path %s", volumePath) } // Both must end in a backslash @@ -25,16 +25,16 @@ func setVolumeMountPoint(targetPath string, volumePath string) error { targetP, err := windows.UTF16PtrFromString(slashedTarget) if err != nil { - return errors.Wrapf(err, "unable to utf16-ise %s", slashedTarget) + return fmt.Errorf("unable to utf16-ise %s: %w", slashedTarget, err) } volumeP, err := windows.UTF16PtrFromString(slashedVolume) if err != nil { - return errors.Wrapf(err, "unable to utf16-ise %s", slashedVolume) + return fmt.Errorf("unable to utf16-ise %s: %w", slashedVolume, err) } if err := windows.SetVolumeMountPoint(targetP, volumeP); err != nil { - return errors.Wrapf(err, "failed calling SetVolumeMount('%s', '%s')", slashedTarget, slashedVolume) + return fmt.Errorf("failed calling SetVolumeMount(%q, %q): %w", slashedTarget, slashedVolume, err) } return nil @@ -48,11 +48,11 @@ func deleteVolumeMountPoint(targetPath string) error { targetP, err := windows.UTF16PtrFromString(slashedTarget) if err != nil { - return errors.Wrapf(err, "unable to utf16-ise %s", slashedTarget) + return fmt.Errorf("unable to utf16-ise %s: %w", slashedTarget, err) } if err := windows.DeleteVolumeMountPoint(targetP); err != nil { - return errors.Wrapf(err, "failed calling DeleteVolumeMountPoint('%s')", slashedTarget) + return fmt.Errorf("failed calling DeleteVolumeMountPoint(%q): %w", slashedTarget, err) } return nil diff --git a/computestorage/attach.go b/computestorage/attach.go index 301a10888f..2365f37da6 100644 --- a/computestorage/attach.go +++ b/computestorage/attach.go @@ -5,9 +5,10 @@ package computestorage import ( "context" "encoding/json" + "fmt" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "go.opencensus.io/trace" ) @@ -34,7 +35,7 @@ func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData L err = hcsAttachLayerStorageFilter(layerPath, string(bytes)) if err != nil { - return errors.Wrap(err, "failed to attach layer storage filter") + return fmt.Errorf("failed to attach layer storage filter: %w", err) } return nil } @@ -62,7 +63,7 @@ func AttachOverlayFilter(ctx context.Context, volumePath string, layerData Layer err = hcsAttachOverlayFilter(volumePath, string(bytes)) if err != nil { - return errors.Wrap(err, "failed to attach overlay filter") + return fmt.Errorf("failed to attach overlay filter: %w", err) } return nil } diff --git a/computestorage/destroy.go b/computestorage/destroy.go index 5058d3b55e..2e1897d470 100644 --- a/computestorage/destroy.go +++ b/computestorage/destroy.go @@ -4,9 +4,10 @@ package computestorage import ( "context" + "fmt" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "go.opencensus.io/trace" ) @@ -22,7 +23,7 @@ func DestroyLayer(ctx context.Context, layerPath string) (err error) { err = hcsDestroyLayer(layerPath) if err != nil { - return errors.Wrap(err, "failed to destroy layer") + return fmt.Errorf("failed to destroy layer: %w", err) } return nil } diff --git a/computestorage/detach.go b/computestorage/detach.go index 6e00e4a1f8..be4c167753 100644 --- a/computestorage/detach.go +++ b/computestorage/detach.go @@ -5,10 +5,11 @@ package computestorage import ( "context" "encoding/json" + "fmt" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "go.opencensus.io/trace" ) @@ -24,7 +25,7 @@ func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) err = hcsDetachLayerStorageFilter(layerPath) if err != nil { - return errors.Wrap(err, "failed to detach layer storage filter") + return fmt.Errorf("failed to detach layer storage filter: %w", err) } return nil } @@ -48,7 +49,7 @@ func DetachOverlayFilter(ctx context.Context, volumePath string, filterType hcss err = hcsDetachOverlayFilter(volumePath, string(bytes)) if err != nil { - return errors.Wrap(err, "failed to detach overlay filter") + return fmt.Errorf("failed to detach overlay filter: %w", err) } return nil } diff --git a/computestorage/export.go b/computestorage/export.go index c6370a5c9a..7e85d65280 100644 --- a/computestorage/export.go +++ b/computestorage/export.go @@ -5,9 +5,10 @@ package computestorage import ( "context" "encoding/json" + "fmt" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "go.opencensus.io/trace" ) @@ -42,7 +43,7 @@ func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerD err = hcsExportLayer(layerPath, exportFolderPath, string(ldBytes), string(oBytes)) if err != nil { - return errors.Wrap(err, "failed to export layer") + return fmt.Errorf("failed to export layer: %w", err) } return nil } diff --git a/computestorage/format.go b/computestorage/format.go index 2140e5c9fc..5e8fe787f3 100644 --- a/computestorage/format.go +++ b/computestorage/format.go @@ -4,9 +4,10 @@ package computestorage import ( "context" + "fmt" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "golang.org/x/sys/windows" ) @@ -26,7 +27,7 @@ func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err err = hcsFormatWritableLayerVhd(vhdHandle) if err != nil { - return errors.Wrap(err, "failed to format writable layer vhd") + return fmt.Errorf("failed to format writable layer vhd: %w", err) } return nil } diff --git a/computestorage/helpers.go b/computestorage/helpers.go index 858c84601c..f21810d37f 100644 --- a/computestorage/helpers.go +++ b/computestorage/helpers.go @@ -4,13 +4,14 @@ package computestorage import ( "context" + "fmt" "os" "path/filepath" "syscall" "github.com/Microsoft/go-winio/vhd" "github.com/Microsoft/hcsshim/internal/memory" - "github.com/pkg/errors" + "golang.org/x/sys/windows" "github.com/Microsoft/hcsshim/internal/security" @@ -42,23 +43,23 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh // differencing disks if they exist in case we're asking for a different size. if _, err := os.Stat(hivesPath); err == nil { if err := os.RemoveAll(hivesPath); err != nil { - return errors.Wrap(err, "failed to remove prexisting hives directory") + return fmt.Errorf("failed to remove prexisting hives directory: %w", err) } } if _, err := os.Stat(layoutPath); err == nil { if err := os.RemoveAll(layoutPath); err != nil { - return errors.Wrap(err, "failed to remove prexisting layout file") + return fmt.Errorf("failed to remove prexisting layout file: %w", err) } } if _, err := os.Stat(baseVhdPath); err == nil { if err := os.RemoveAll(baseVhdPath); err != nil { - return errors.Wrap(err, "failed to remove base vhdx path") + return fmt.Errorf("failed to remove base vhdx path: %w", err) } } if _, err := os.Stat(diffVhdPath); err == nil { if err := os.RemoveAll(diffVhdPath); err != nil { - return errors.Wrap(err, "failed to remove differencing vhdx") + return fmt.Errorf("failed to remove differencing vhdx: %w", err) } } @@ -71,7 +72,7 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh } handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) if err != nil { - return errors.Wrap(err, "failed to create vhdx") + return fmt.Errorf("failed to create vhdx: %w", err) } defer func() { @@ -87,7 +88,7 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh } // Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer if err = syscall.CloseHandle(handle); err != nil { - return errors.Wrap(err, "failed to close vhdx handle") + return fmt.Errorf("failed to close vhdx handle: %w", err) } options := OsLayerOptions{ @@ -102,14 +103,14 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh // Create the differencing disk that will be what's copied for the final rw layer // for a container. if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { - return errors.Wrap(err, "failed to create differencing disk") + return fmt.Errorf("failed to create differencing disk: %w", err) } if err = security.GrantVmGroupAccess(baseVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + return fmt.Errorf("failed to grant vm group access to %s: %w", baseVhdPath, err) } if err = security.GrantVmGroupAccess(diffVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + return fmt.Errorf("failed to grant vm group access to %s: %w", diffVhdPath, err) } return nil } @@ -128,12 +129,12 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP // Remove the base and differencing disks if they exist in case we're asking for a different size. if _, err := os.Stat(baseVhdPath); err == nil { if err := os.RemoveAll(baseVhdPath); err != nil { - return errors.Wrap(err, "failed to remove base vhdx") + return fmt.Errorf("failed to remove base vhdx: %w", err) } } if _, err := os.Stat(diffVhdPath); err == nil { if err := os.RemoveAll(diffVhdPath); err != nil { - return errors.Wrap(err, "failed to remove differencing vhdx") + return fmt.Errorf("failed to remove differencing vhdx: %w", err) } } @@ -147,7 +148,7 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP } handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) if err != nil { - return errors.Wrap(err, "failed to create vhdx") + return fmt.Errorf("failed to create vhdx: %w", err) } defer func() { @@ -164,7 +165,7 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP Version: 2, } if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil { - return errors.Wrapf(err, "failed to attach virtual disk") + return fmt.Errorf("failed to attach virtual disk: %w", err) } options := OsLayerOptions{ @@ -177,23 +178,23 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP // Detach and close the handle after setting up the layer as we don't need the handle // for anything else and we no longer need to be attached either. if err = vhd.DetachVirtualDisk(handle); err != nil { - return errors.Wrap(err, "failed to detach vhdx") + return fmt.Errorf("failed to detach vhdx: %w", err) } if err = syscall.CloseHandle(handle); err != nil { - return errors.Wrap(err, "failed to close vhdx handle") + return fmt.Errorf("failed to close vhdx handle: %w", err) } // Create the differencing disk that will be what's copied for the final rw layer // for a container. if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { - return errors.Wrap(err, "failed to create differencing disk") + return fmt.Errorf("failed to create differencing disk: %w", err) } if err := security.GrantVmGroupAccess(baseVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + return fmt.Errorf("failed to grant vm group access to %s: %w", baseVhdPath, err) } if err := security.GrantVmGroupAccess(diffVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + return fmt.Errorf("failed to grant vm group access to %s: %w", diffVhdPath, err) } return nil } diff --git a/computestorage/import.go b/computestorage/import.go index e1c87416a3..bacc7d53c1 100644 --- a/computestorage/import.go +++ b/computestorage/import.go @@ -5,9 +5,10 @@ package computestorage import ( "context" "encoding/json" + "fmt" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "go.opencensus.io/trace" ) @@ -37,7 +38,7 @@ func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerD err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes)) if err != nil { - return errors.Wrap(err, "failed to import layer") + return fmt.Errorf("failed to import layer: %w", err) } return nil } diff --git a/computestorage/initialize.go b/computestorage/initialize.go index d0c6216056..0fbcb0eb71 100644 --- a/computestorage/initialize.go +++ b/computestorage/initialize.go @@ -5,9 +5,10 @@ package computestorage import ( "context" "encoding/json" + "fmt" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "go.opencensus.io/trace" ) @@ -34,7 +35,7 @@ func InitializeWritableLayer(ctx context.Context, layerPath string, layerData La // Options are not used in the platform as of RS5 err = hcsInitializeWritableLayer(layerPath, string(bytes), "") if err != nil { - return errors.Wrap(err, "failed to intitialize container layer") + return fmt.Errorf("failed to intitialize container layer: %w", err) } return nil } diff --git a/computestorage/mount.go b/computestorage/mount.go index 4f4d8ebf2f..755d2caccb 100644 --- a/computestorage/mount.go +++ b/computestorage/mount.go @@ -4,10 +4,11 @@ package computestorage import ( "context" + "fmt" "github.com/Microsoft/hcsshim/internal/interop" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "golang.org/x/sys/windows" ) @@ -21,7 +22,7 @@ func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path s var mountPath *uint16 err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath) if err != nil { - return "", errors.Wrap(err, "failed to get vhd mount path") + return "", fmt.Errorf("failed to get vhd mount path: %w", err) } path = interop.ConvertAndFreeCoTaskMemString(mountPath) return path, nil diff --git a/computestorage/setup.go b/computestorage/setup.go index 1c685aed0a..c37c820bcd 100644 --- a/computestorage/setup.go +++ b/computestorage/setup.go @@ -5,10 +5,12 @@ package computestorage import ( "context" "encoding/json" + "errors" + "fmt" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" + "go.opencensus.io/trace" "golang.org/x/sys/windows" ) @@ -38,7 +40,7 @@ func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.H err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes)) if err != nil { - return errors.Wrap(err, "failed to setup base OS layer") + return fmt.Errorf("failed to setup base OS layer: %w", err) } return nil } @@ -74,7 +76,7 @@ func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, option err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes)) if err != nil { - return errors.Wrap(err, "failed to setup base OS layer") + return fmt.Errorf("failed to setup base OS layer: %w", err) } return nil } diff --git a/ext4/dmverity/dmverity.go b/ext4/dmverity/dmverity.go index 6e91c4bbc5..5dc13dc841 100644 --- a/ext4/dmverity/dmverity.go +++ b/ext4/dmverity/dmverity.go @@ -6,12 +6,11 @@ import ( "crypto/rand" "crypto/sha256" "encoding/binary" + "errors" "fmt" "io" "os" - "github.com/pkg/errors" - "github.com/Microsoft/hcsshim/ext4/internal/compactext4" "github.com/Microsoft/hcsshim/internal/memory" ) @@ -93,7 +92,7 @@ func MerkleTree(r io.Reader) ([]byte, error) { if err == io.EOF { break } - return nil, errors.Wrap(err, "failed to read data block") + return nil, fmt.Errorf("failed to read data block: %w", err) } h := hash2(salt, block) nextLevel.Write(h) @@ -116,7 +115,7 @@ func MerkleTree(r io.Reader) ([]byte, error) { tree := bytes.NewBuffer(make([]byte, 0)) for i := len(layers) - 1; i >= 0; i-- { if _, err := tree.Write(layers[i]); err != nil { - return nil, errors.Wrap(err, "failed to write merkle tree") + return nil, fmt.Errorf("failed to write merkle tree: %w", err) } } @@ -173,9 +172,9 @@ func ReadDMVerityInfo(vhdPath string, offsetInBytes int64) (*VerityInfo, error) // Skip the ext4 data to get to dm-verity super block if s, err := vhd.Seek(offsetInBytes, io.SeekStart); err != nil || s != offsetInBytes { if err != nil { - return nil, errors.Wrap(err, "failed to seek dm-verity super block") + return nil, fmt.Errorf("failed to seek dm-verity super block: %w", err) } - return nil, errors.Errorf("failed to seek dm-verity super block: expected bytes=%d, actual=%d", offsetInBytes, s) + return nil, fmt.Errorf("failed to seek dm-verity super block: expected bytes=%d, actual=%d", offsetInBytes, s) } return ReadDMVerityInfoReader(vhd) @@ -238,7 +237,7 @@ func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.Writer) error { tree, err := MerkleTree(r) if err != nil { - return errors.Wrap(err, "failed to build merkle tree") + return fmt.Errorf("failed to build merkle tree: %w", err) } devSize, err := r.Seek(0, io.SeekEnd) @@ -253,7 +252,7 @@ func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.Writer) error { dmVeritySB := NewDMVeritySuperblock(uint64(devSize)) if err := binary.Write(w, binary.LittleEndian, dmVeritySB); err != nil { - return errors.Wrap(err, "failed to write dm-verity super-block") + return fmt.Errorf("failed to write dm-verity super-block: %w", err) } // write super-block padding padding := bytes.Repeat([]byte{0}, blockSize-(sbSize%blockSize)) @@ -262,7 +261,7 @@ func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.Writer) error { } // write tree if _, err := w.Write(tree); err != nil { - return errors.Wrap(err, "failed to write merkle tree") + return fmt.Errorf("failed to write merkle tree: %w", err) } return nil } diff --git a/ext4/tar2ext4/tar2ext4.go b/ext4/tar2ext4/tar2ext4.go index 5af6bc21bf..4fbac07fbc 100644 --- a/ext4/tar2ext4/tar2ext4.go +++ b/ext4/tar2ext4/tar2ext4.go @@ -4,6 +4,7 @@ import ( "archive/tar" "bufio" "encoding/binary" + "errors" "fmt" "io" "os" @@ -14,7 +15,6 @@ import ( "github.com/Microsoft/hcsshim/ext4/internal/compactext4" "github.com/Microsoft/hcsshim/ext4/internal/format" "github.com/Microsoft/hcsshim/internal/log" - "github.com/pkg/errors" ) type params struct { @@ -109,7 +109,7 @@ func ConvertTarToExt4(r io.Reader, w io.ReadWriteSeeker, options ...Option) erro } if err = fs.MakeParents(name); err != nil { - return errors.Wrapf(err, "failed to ensure parent directories for %s", name) + return fmt.Errorf("failed to ensure parent directories for %s: %w", name, err) } if p.convertWhiteout { @@ -119,12 +119,12 @@ func ConvertTarToExt4(r io.Reader, w io.ReadWriteSeeker, options ...Option) erro // Update the directory with the appropriate xattr. f, err := fs.Stat(dir) if err != nil { - return errors.Wrapf(err, "failed to stat parent directory of whiteout %s", file) + return fmt.Errorf("failed to stat parent directory of whiteout %s: %w", file, err) } f.Xattrs["trusted.overlay.opaque"] = []byte("y") err = fs.Create(dir, f) if err != nil { - return errors.Wrapf(err, "failed to create opaque dir %s", file) + return fmt.Errorf("failed to create opaque dir %s: %w", file, err) } } else { // Create an overlay-style whiteout. @@ -135,7 +135,7 @@ func ConvertTarToExt4(r io.Reader, w io.ReadWriteSeeker, options ...Option) erro } err = fs.Create(path.Join(dir, file[len(whiteoutPrefix):]), f) if err != nil { - return errors.Wrapf(err, "failed to create whiteout file for %s", file) + return fmt.Errorf("failed to create whiteout file for %s: %w", file, err) } } diff --git a/go.mod b/go.mod index 896a343e17..6e02a663e5 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,6 @@ require ( github.com/opencontainers/runc v1.1.14 github.com/opencontainers/runtime-spec v1.2.0 github.com/pelletier/go-toml v1.9.5 - github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 github.com/urfave/cli v1.22.15 github.com/vishvananda/netlink v1.3.0 @@ -91,6 +90,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.20.2 // indirect github.com/prometheus/client_model v0.6.1 // indirect diff --git a/hcn/hcnsupport.go b/hcn/hcnsupport.go index c80b4ef901..75e4d72129 100644 --- a/hcn/hcnsupport.go +++ b/hcn/hcnsupport.go @@ -3,9 +3,9 @@ package hcn import ( + "fmt" "sync" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/log" @@ -89,7 +89,7 @@ func getSupportedFeatures() (SupportedFeatures, error) { globals, err := GetGlobals() if err != nil { // It's expected if this fails once, it should always fail. It should fail on pre 1803 builds for example. - return SupportedFeatures{}, errors.Wrap(err, "failed to query HCN version number: this is expected on pre 1803 builds.") + return SupportedFeatures{}, fmt.Errorf("failed to query HCN version number: this is expected on pre 1803 builds.: %w", err) } features.Acl = AclFeatures{ AclAddressLists: isFeatureSupported(globals.Version, HNSVersion1803), diff --git a/internal/cmd/io_binary.go b/internal/cmd/io_binary.go index 3647b66aae..715ab29a9e 100644 --- a/internal/cmd/io_binary.go +++ b/internal/cmd/io_binary.go @@ -4,6 +4,7 @@ package cmd import ( "context" + "errors" "fmt" "io" "net" @@ -16,7 +17,6 @@ import ( "github.com/Microsoft/go-winio" "github.com/containerd/containerd/namespaces" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/log" @@ -101,7 +101,7 @@ func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (_ UpstreamIO, er select { case err = <-errCh: if err != nil { - return nil, errors.Wrap(err, "failed to start binary logger") + return nil, fmt.Errorf("failed to start binary logger: %w", err) } case <-time.After(binaryCmdStartTimeout): return nil, errors.New("failed to start binary logger: timeout") @@ -275,7 +275,7 @@ func openNPipe(path string) (io.ReadWriteCloser, error) { func (p *pipe) Write(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Write(b) } @@ -283,7 +283,7 @@ func (p *pipe) Write(b []byte) (int, error) { func (p *pipe) Read(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Read(b) } diff --git a/internal/cpugroup/cpugroup.go b/internal/cpugroup/cpugroup.go index 3abaa9c439..1560571872 100644 --- a/internal/cpugroup/cpugroup.go +++ b/internal/cpugroup/cpugroup.go @@ -5,12 +5,12 @@ package cpugroup import ( "context" "encoding/json" + "errors" "fmt" "strings" "github.com/Microsoft/hcsshim/internal/hcs" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/pkg/errors" ) const NullGroupID = "00000000-0000-0000-0000-000000000000" @@ -50,7 +50,7 @@ func Create(ctx context.Context, id string, logicalProcessors []uint32) error { LogicalProcessorCount: uint32(len(logicalProcessors)), } if err := modifyCPUGroupRequest(ctx, operation, details); err != nil { - return errors.Wrapf(err, "failed to make cpugroups CreateGroup request for details %+v", details) + return fmt.Errorf("failed to make cpugroups CreateGroup request for details %+v: %w", details, err) } return nil } @@ -66,7 +66,7 @@ func GetCPUGroupConfig(ctx context.Context, id string) (*hcsschema.CpuGroupConfi } groupConfigs := &hcsschema.CpuGroupConfigurations{} if err := json.Unmarshal(cpuGroupsPresent.Properties[0], groupConfigs); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal host cpugroups") + return nil, fmt.Errorf("failed to unmarshal host cpugroups: %w", err) } for _, c := range groupConfigs.CpuGroups { diff --git a/internal/devices/assigned_devices.go b/internal/devices/assigned_devices.go index 50a840b46d..68e042a1a2 100644 --- a/internal/devices/assigned_devices.go +++ b/internal/devices/assigned_devices.go @@ -12,7 +12,6 @@ import ( "github.com/Microsoft/hcsshim/internal/cmd" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/pkg/errors" ) // AddDevice is the api exposed to oci/hcsoci to handle assigning a device on a WCOW UVM @@ -43,7 +42,7 @@ func AddDevice(ctx context.Context, vm *uvm.UtilityVM, idType, deviceID string, if uvm.IsValidDeviceType(idType) { vpci, err = vm.AssignDevice(ctx, deviceID, index, "") if err != nil { - return vpci, nil, errors.Wrapf(err, "failed to assign device %s of type %s to pod %s", deviceID, idType, vm.ID()) + return vpci, nil, fmt.Errorf("failed to assign device %s of type %s to pod %s: %w", deviceID, idType, vm.ID(), err) } vmBusInstanceID := vm.GetAssignedDeviceVMBUSInstanceID(vpci.VMBusGUID) log.G(ctx).WithField("vmbus id", vmBusInstanceID).Info("vmbus instance ID") @@ -77,7 +76,7 @@ func getChildrenDeviceLocationPaths(ctx context.Context, vm *uvm.UtilityVM, vmBu } exitCode, err := cmd.ExecInUvm(ctx, vm, cmdReq) if err != nil { - return nil, errors.Wrapf(err, "failed to find devices with exit code %d", exitCode) + return nil, fmt.Errorf("failed to find devices with exit code %d: %w", exitCode, err) } // wait to finish parsing stdout results diff --git a/internal/devices/pnp.go b/internal/devices/pnp.go index 72d4fc141a..6ad86c1eb0 100644 --- a/internal/devices/pnp.go +++ b/internal/devices/pnp.go @@ -5,6 +5,7 @@ package devices import ( "context" + "errors" "fmt" "io" "net" @@ -15,7 +16,7 @@ import ( "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/internal/winapi" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) @@ -53,7 +54,7 @@ func execPnPInstallDriver(ctx context.Context, vm *uvm.UtilityVM, driverDir stri } exitCode, err := cmd.ExecInUvm(ctx, vm, cmdReq) if err != nil && exitCode != winapi.ERROR_NO_MORE_ITEMS { - return errors.Wrapf(err, "failed to install driver %s in uvm with exit code %d", driverDir, exitCode) + return fmt.Errorf("failed to install driver %s in uvm with exit code %d: %w", driverDir, exitCode, err) } else if exitCode == winapi.ERROR_NO_MORE_ITEMS { // As mentioned in `pnputilNoMoreItemsErrorMessage`, this exit code comes from pnputil // but is not necessarily an error @@ -76,7 +77,7 @@ func readCsPipeOutput(l net.Listener, errChan chan<- error, result *[]string) { defer close(errChan) c, err := l.Accept() if err != nil { - errChan <- errors.Wrapf(err, "failed to accept named pipe") + errChan <- fmt.Errorf("failed to accept named pipe: %w", err) return } bytes, err := io.ReadAll(c) @@ -105,7 +106,7 @@ func readAllPipeOutput(l net.Listener, errChan chan<- error, result *string) { defer close(errChan) c, err := l.Accept() if err != nil { - errChan <- errors.Wrapf(err, "failed to accept named pipe") + errChan <- fmt.Errorf("failed to accept named pipe: %w", err) return } bytes, err := io.ReadAll(c) diff --git a/internal/gcs/guestconnection.go b/internal/gcs/guestconnection.go index fe974b5c17..07aa9363be 100644 --- a/internal/gcs/guestconnection.go +++ b/internal/gcs/guestconnection.go @@ -7,6 +7,7 @@ import ( "encoding/base64" "encoding/hex" "encoding/json" + "errors" "fmt" "io" "net" @@ -20,7 +21,7 @@ import ( "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "go.opencensus.io/trace" ) diff --git a/internal/guest/bridge/bridge.go b/internal/guest/bridge/bridge.go index f14663344f..52e978ca3f 100644 --- a/internal/guest/bridge/bridge.go +++ b/internal/guest/bridge/bridge.go @@ -13,12 +13,10 @@ import ( "io" "math" "os" - "strconv" "sync" "sync/atomic" "time" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "go.opencensus.io/trace" "go.opencensus.io/trace/tracestate" @@ -33,7 +31,7 @@ import ( // UnknownMessage represents the default handler logic for an unmatched request // type sent from the bridge. func UnknownMessage(r *Request) (RequestResponse, error) { - return nil, gcserr.WrapHresult(errors.Errorf("bridge: function not supported, header type: %v", r.Header.Type), gcserr.HrNotImpl) + return nil, gcserr.WrapHresult(fmt.Errorf("bridge: function not supported, header type: %v", r.Header.Type), gcserr.HrNotImpl) } // UnknownMessageHandler creates a default HandlerFunc out of the @@ -249,7 +247,7 @@ func (b *Bridge) ListenAndServe(bridgeIn io.ReadCloser, bridgeOut io.WriteCloser if err == io.ErrUnexpectedEOF || err == os.ErrClosed { //nolint:errorlint break } - recverr = errors.Wrap(err, "bridge: failed reading message header") + recverr = fmt.Errorf("bridge: failed reading message header: %w", err) break } message := make([]byte, header.Size-prot.MessageHeaderSize) @@ -257,7 +255,7 @@ func (b *Bridge) ListenAndServe(bridgeIn io.ReadCloser, bridgeOut io.WriteCloser if err == io.ErrUnexpectedEOF || err == os.ErrClosed { //nolint:errorlint break } - recverr = errors.Wrap(err, "bridge: failed reading message payload") + recverr = fmt.Errorf("bridge: failed reading message payload: %w", err) break } @@ -373,17 +371,17 @@ func (b *Bridge) ListenAndServe(bridgeIn io.ReadCloser, bridgeOut io.WriteCloser for resp := range b.responseChan { responseBytes, err := json.Marshal(resp.response) if err != nil { - resperr = errors.Wrapf(err, "bridge: failed to marshal JSON for response \"%v\"", resp.response) + resperr = fmt.Errorf("bridge: failed to marshal JSON for response \"%v\": %w", resp.response, err) break } resp.header.Size = uint32(len(responseBytes) + prot.MessageHeaderSize) if err := binary.Write(bridgeOut, binary.LittleEndian, resp.header); err != nil { - resperr = errors.Wrap(err, "bridge: failed writing message header") + resperr = fmt.Errorf("bridge: failed writing message header: %w", err) break } if _, err := bridgeOut.Write(responseBytes); err != nil { - resperr = errors.Wrap(err, "bridge: failed writing message payload") + resperr = fmt.Errorf("bridge: failed writing message payload: %w", err) break } @@ -415,7 +413,7 @@ func (b *Bridge) ListenAndServe(bridgeIn io.ReadCloser, bridgeOut io.WriteCloser case <-time.After(time.Second * 5): // Timeout expired first. Close the connection to unblock the read if cerr := bridgeIn.Close(); cerr != nil { - err = errors.Wrap(cerr, "bridge: failed to close bridgeIn") + err = fmt.Errorf("bridge: failed to close bridgeIn: %w", cerr) } <-requestErrChan } @@ -455,21 +453,6 @@ func setErrorForResponseBase(response *prot.MessageResponseBase, errForResponse // (Still keep using -1 for backwards compatibility ...) lineNumber := uint32(math.MaxUint32) functionName := "" - if stack := gcserr.BaseStackTrace(errForResponse); stack != nil { - bottomFrame := stack[0] - stackString = fmt.Sprintf("%+v", stack) - fileName = fmt.Sprintf("%s", bottomFrame) - lineNumberStr := fmt.Sprintf("%d", bottomFrame) - if n, err := strconv.ParseUint(lineNumberStr, 10, 32); err == nil { - lineNumber = uint32(n) - } else { - logrus.WithFields(logrus.Fields{ - "line-number": lineNumberStr, - logrus.ErrorKey: err, - }).Error("opengcs::bridge::setErrorForResponseBase - failed to parse line number, using -1 instead") - } - functionName = fmt.Sprintf("%n", bottomFrame) - } hresult, err := gcserr.GetHresult(errForResponse) if err != nil { // Default to using the generic failure HRESULT. diff --git a/internal/guest/bridge/bridge_unit_test.go b/internal/guest/bridge/bridge_unit_test.go index 67f583da05..1af1068524 100644 --- a/internal/guest/bridge/bridge_unit_test.go +++ b/internal/guest/bridge/bridge_unit_test.go @@ -6,6 +6,8 @@ package bridge import ( "encoding/binary" "encoding/json" + "errors" + "fmt" "io" "os" "strings" @@ -15,7 +17,7 @@ import ( "github.com/Microsoft/hcsshim/internal/guest/gcserr" "github.com/Microsoft/hcsshim/internal/guest/prot" "github.com/Microsoft/hcsshim/internal/guest/transport" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) @@ -385,7 +387,7 @@ func serverSend(conn io.Writer, messageType prot.MessageIdentifier, messageID pr var err error body, err = json.Marshal(i) if err != nil { - return errors.Wrap(err, "failed to json marshal to server.") + return fmt.Errorf("failed to json marshal to server.: %w", err) } } @@ -397,11 +399,11 @@ func serverSend(conn io.Writer, messageType prot.MessageIdentifier, messageID pr // Send the header. if err := binary.Write(conn, binary.LittleEndian, header); err != nil { - return errors.Wrap(err, "bridge_test: failed to write message header") + return fmt.Errorf("bridge_test: failed to write message header: %w", err) } // Send the body. if _, err := conn.Write(body); err != nil { - return errors.Wrap(err, "bridge_test: failed to write the message body") + return fmt.Errorf("bridge_test: failed to write the message body: %w", err) } return nil } @@ -410,12 +412,12 @@ func serverRead(conn io.Reader) (*prot.MessageHeader, []byte, error) { header := &prot.MessageHeader{} // Read the header. if err := binary.Read(conn, binary.LittleEndian, header); err != nil { - return nil, nil, errors.Wrap(err, "bridge_test: failed to read message header") + return nil, nil, fmt.Errorf("bridge_test: failed to read message header: %w", err) } message := make([]byte, header.Size-prot.MessageHeaderSize) // Read the body. if _, err := io.ReadFull(conn, message); err != nil { - return nil, nil, errors.Wrap(err, "bridge_test: failed to read the message body") + return nil, nil, fmt.Errorf("bridge_test: failed to read the message body: %w", err) } return header, message, nil diff --git a/internal/guest/bridge/bridge_v2.go b/internal/guest/bridge/bridge_v2.go index f9712abc9d..aa930b8b2d 100644 --- a/internal/guest/bridge/bridge_v2.go +++ b/internal/guest/bridge/bridge_v2.go @@ -6,10 +6,11 @@ package bridge import ( "context" "encoding/json" + "errors" + "fmt" "syscall" "time" - "github.com/pkg/errors" "go.opencensus.io/trace" "golang.org/x/sys/unix" @@ -53,7 +54,7 @@ func (b *Bridge) negotiateProtocolV2(r *Request) (_ RequestResponse, err error) var request prot.NegotiateProtocol if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } if request.MaximumVersion < uint32(prot.PvV4) || uint32(prot.PvMax) < request.MinimumVersion { @@ -89,18 +90,16 @@ func (b *Bridge) createContainerV2(r *Request) (_ RequestResponse, err error) { var request prot.ContainerCreate if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } var settingsV2 prot.VMHostedContainerSettingsV2 if err := commonutils.UnmarshalJSONWithHresult([]byte(request.ContainerConfig), &settingsV2); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON for ContainerConfig \"%s\"", request.ContainerConfig) + return nil, fmt.Errorf("failed to unmarshal JSON for ContainerConfig %q: %w", request.ContainerConfig, err) } if settingsV2.SchemaVersion.Cmp(prot.SchemaVersion{Major: 2, Minor: 1}) < 0 { - return nil, gcserr.WrapHresult( - errors.Errorf("invalid schema version: %v", settingsV2.SchemaVersion), - gcserr.HrVmcomputeInvalidJSON) + return nil, gcserr.WrapHresult(fmt.Errorf("invalid schema version: %v", settingsV2.SchemaVersion), gcserr.HrVmcomputeInvalidJSON) } c, err := b.hostState.CreateContainer(ctx, request.ContainerID, &settingsV2) @@ -144,7 +143,7 @@ func (b *Bridge) startContainerV2(r *Request) (_ RequestResponse, err error) { // returned to the HCS. var request prot.MessageBase if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } return &prot.MessageResponseBase{}, nil @@ -173,14 +172,14 @@ func (b *Bridge) execProcessV2(r *Request) (_ RequestResponse, err error) { var request prot.ContainerExecuteProcess if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } // The request contains a JSON string field which is equivalent to an // ExecuteProcessInfo struct. var params prot.ProcessParameters if err := commonutils.UnmarshalJSONWithHresult([]byte(request.Settings.ProcessParameters), ¶ms); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON for ProcessParameters \"%s\"", request.Settings.ProcessParameters) + return nil, fmt.Errorf("failed to unmarshal JSON for ProcessParameters %q: %w", request.Settings.ProcessParameters, err) } var conSettings stdio.ConnectionSettings @@ -195,7 +194,6 @@ func (b *Bridge) execProcessV2(r *Request) (_ RequestResponse, err error) { } pid, err := b.hostState.ExecProcess(ctx, request.ContainerID, params, conSettings) - if err != nil { return nil, err } @@ -243,7 +241,7 @@ func (b *Bridge) signalContainerShutdownV2(ctx context.Context, span *trace.Span var request prot.MessageBase if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } // If this is targeting the UVM send the request to the host itself. @@ -270,7 +268,7 @@ func (b *Bridge) signalProcessV2(r *Request) (_ RequestResponse, err error) { var request prot.ContainerSignalProcess if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } span.AddAttributes( @@ -299,14 +297,14 @@ func (b *Bridge) getPropertiesV2(r *Request) (_ RequestResponse, err error) { var request prot.ContainerGetProperties if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } var query prot.PropertyQuery if len(request.Query) != 0 { if err := json.Unmarshal([]byte(request.Query), &query); err != nil { e := gcserr.WrapHresult(err, gcserr.HrVmcomputeInvalidJSON) - return nil, errors.Wrapf(e, "The query could not be unmarshaled: '%s'", query) + return nil, fmt.Errorf("The query %q could not be unmarshaled: %w", query, e) } } @@ -324,7 +322,7 @@ func (b *Bridge) getPropertiesV2(r *Request) (_ RequestResponse, err error) { var err error propertyJSON, err = json.Marshal(properties) if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%+v\"", properties) + return nil, fmt.Errorf("failed to unmarshal JSON in message \"%+v\": %w", properties, err) } } @@ -341,7 +339,7 @@ func (b *Bridge) waitOnProcessV2(r *Request) (_ RequestResponse, err error) { var request prot.ContainerWaitForProcess if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } span.AddAttributes( @@ -396,7 +394,7 @@ func (b *Bridge) resizeConsoleV2(r *Request) (_ RequestResponse, err error) { var request prot.ContainerResizeConsole if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } span.AddAttributes( @@ -430,7 +428,7 @@ func (b *Bridge) modifySettingsV2(r *Request) (_ RequestResponse, err error) { request, err := prot.UnmarshalContainerModifySettings(r.Message) if err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } err = b.hostState.ModifySettings(ctx, request.ContainerID, request.Request.(*guestrequest.ModificationRequest)) @@ -464,7 +462,7 @@ func (b *Bridge) deleteContainerStateV2(r *Request) (_ RequestResponse, err erro var request prot.MessageBase if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message) + return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err) } c, err := b.hostState.GetCreatedContainer(request.ContainerID) diff --git a/internal/guest/gcserr/errors.go b/internal/guest/gcserr/errors.go index 8e793ab806..f15ee21a86 100644 --- a/internal/guest/gcserr/errors.go +++ b/internal/guest/gcserr/errors.go @@ -1,10 +1,8 @@ package gcserr import ( + "errors" "fmt" - "io" - - "github.com/pkg/errors" ) // Hresult is a type corresponding to the HRESULT error type used on Windows. @@ -56,39 +54,6 @@ const ( // TODO: update implementation to use go1.13 style errors with `errors.As` and co. -// StackTracer is an interface originating (but not exported) from the -// github.com/pkg/errors package. It defines something which can return a stack -// trace. -type StackTracer interface { - StackTrace() errors.StackTrace -} - -// BaseStackTrace gets the earliest errors.StackTrace in the given error's cause -// stack. This will be the stack trace which reaches closest to the error's -// actual origin. It returns nil if no stack trace is found in the cause stack. -func BaseStackTrace(e error) errors.StackTrace { - type causer interface { - Cause() error - } - cause := e - var tracer StackTracer - for cause != nil { - serr, ok := cause.(StackTracer) //nolint:errorlint - if ok { - tracer = serr - } - cerr, ok := cause.(causer) //nolint:errorlint - if !ok { - break - } - cause = cerr.Cause() - } - if tracer == nil { - return nil - } - return tracer.StackTrace() -} - type baseHresultError struct { hresult Hresult } @@ -96,6 +61,7 @@ type baseHresultError struct { func (e *baseHresultError) Error() string { return fmt.Sprintf("HRESULT: 0x%x", uint32(e.Hresult())) } + func (e *baseHresultError) Hresult() Hresult { return e.hresult } @@ -106,38 +72,16 @@ type wrappingHresultError struct { } func (e *wrappingHresultError) Error() string { - return fmt.Sprintf("HRESULT 0x%x", uint32(e.Hresult())) + ": " + e.Cause().Error() + return fmt.Sprintf("HRESULT 0x%x", uint32(e.Hresult())) + ": " + e.Unwrap().Error() } + func (e *wrappingHresultError) Hresult() Hresult { return e.hresult } -func (e *wrappingHresultError) Cause() error { + +func (e *wrappingHresultError) Unwrap() error { return e.cause } -func (e *wrappingHresultError) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", e.Cause()) - return - } - fallthrough - case 's': - _, _ = io.WriteString(s, e.Error()) - case 'q': - fmt.Fprintf(s, "%q", e.Error()) - } -} -func (e *wrappingHresultError) StackTrace() errors.StackTrace { - type stackTracer interface { - StackTrace() errors.StackTrace - } - serr, ok := e.Cause().(stackTracer) //nolint:errorlint - if !ok { - return nil - } - return serr.StackTrace() -} // NewHresultError produces a new error with the given HRESULT. func NewHresultError(hresult Hresult) error { @@ -146,6 +90,8 @@ func NewHresultError(hresult Hresult) error { // WrapHresult produces a new error with the given HRESULT and wrapping the // given error. +// +// Deprecated: use [fmt.Errorf] with %w and [NewHresultError] instead. func WrapHresult(e error, hresult Hresult) error { return &wrappingHresultError{ cause: e, @@ -153,29 +99,14 @@ func WrapHresult(e error, hresult Hresult) error { } } -// GetHresult iterates through the error's cause stack (similar to how the -// Cause function in github.com/pkg/errors operates). At the first error it -// encounters which implements the Hresult() method, it return's that error's -// HRESULT. This allows errors higher up in the cause stack to shadow the -// HRESULTs of errors lower down. +// GetHresult returns the topmost HRESULT of an error, if possible, or an error. func GetHresult(e error) (Hresult, error) { type hresulter interface { Hresult() Hresult } - type causer interface { - Cause() error - } - cause := e - for cause != nil { - herr, ok := cause.(hresulter) //nolint:errorlint - if ok { - return herr.Hresult(), nil - } - cerr, ok := cause.(causer) //nolint:errorlint - if !ok { - break - } - cause = cerr.Cause() + var herr hresulter + if errors.As(e, &herr) { + return herr.Hresult(), nil } - return -1, errors.Errorf("no HRESULT found in cause stack for error %s", e) + return -1, fmt.Errorf("no HRESULT found in stack for error %s", e) } diff --git a/internal/guest/network/netns.go b/internal/guest/network/netns.go index e414e5e320..b93a6d64a9 100644 --- a/internal/guest/network/netns.go +++ b/internal/guest/network/netns.go @@ -5,6 +5,7 @@ package network import ( "context" + "errors" "fmt" "net" "os/exec" @@ -14,7 +15,7 @@ import ( "github.com/Microsoft/hcsshim/internal/guest/prot" "github.com/Microsoft/hcsshim/internal/log" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "github.com/vishvananda/netns" @@ -26,15 +27,15 @@ func MoveInterfaceToNS(ifStr string, pid int) error { // Get a reference to the interface and make sure it's down link, err := netlink.LinkByName(ifStr) if err != nil { - return errors.Wrapf(err, "netlink.LinkByName(%s) failed", ifStr) + return fmt.Errorf("netlink.LinkByName(%s) failed: %w", ifStr, err) } if err := netlink.LinkSetDown(link); err != nil { - return errors.Wrapf(err, "netlink.LinkSetDown(%#v) failed", link) + return fmt.Errorf("netlink.LinkSetDown(%#v) failed: %w", link, err) } // Move the interface to the new network namespace if err := netlink.LinkSetNsPid(link, pid); err != nil { - return errors.Wrapf(err, "netlink.SetNsPid(%#v, %d) failed", link, pid) + return fmt.Errorf("netlink.SetNsPid(%#v, %d) failed: %w", link, pid, err) } return nil } @@ -49,12 +50,12 @@ func DoInNetNS(ns netns.NsHandle, run func() error) error { origNs, err := netns.Get() if err != nil { - return errors.Wrap(err, "failed to get current network namespace") + return fmt.Errorf("failed to get current network namespace: %w", err) } defer origNs.Close() if err := netns.Set(ns); err != nil { - return errors.Wrapf(err, "failed to set network namespace to %v", ns) + return fmt.Errorf("failed to set network namespace to %v: %w", ns, err) } // Defer so we can re-enter the threads original netns on exit. defer netns.Set(origNs) //nolint:errcheck @@ -79,7 +80,7 @@ func NetNSConfig(ctx context.Context, ifStr string, nsPid int, adapter *prot.Net entry.Trace("Obtaining current namespace") ns, err := netns.Get() if err != nil { - return errors.Wrap(err, "netns.Get() failed") + return fmt.Errorf("netns.Get() failed: %w", err) } defer ns.Close() entry.WithField("namespace", ns).Debug("New network namespace from PID") @@ -88,7 +89,7 @@ func NetNSConfig(ctx context.Context, ifStr string, nsPid int, adapter *prot.Net entry.Trace("Getting reference to interface") link, err := netlink.LinkByName(ifStr) if err != nil { - return errors.Wrapf(err, "netlink.LinkByName(%s) failed", ifStr) + return fmt.Errorf("netlink.LinkByName(%s) failed: %w", ifStr, err) } // User requested non-default MTU size @@ -96,7 +97,7 @@ func NetNSConfig(ctx context.Context, ifStr string, nsPid int, adapter *prot.Net mtu := link.Attrs().MTU - int(adapter.EncapOverhead) entry.WithField("mtu", mtu).Debug("EncapOverhead non-zero, will set MTU") if err = netlink.LinkSetMTU(link, mtu); err != nil { - return errors.Wrapf(err, "netlink.LinkSetMTU(%#v, %d) failed", link, mtu) + return fmt.Errorf("netlink.LinkSetMTU(%#v, %d) failed: %w", link, mtu, err) } } @@ -112,7 +113,7 @@ func NetNSConfig(ctx context.Context, ifStr string, nsPid int, adapter *prot.Net // Bring the interface up if err := netlink.LinkSetUp(link); err != nil { - return errors.Wrapf(err, "netlink.LinkSetUp(%#v) failed", link) + return fmt.Errorf("netlink.LinkSetUp(%#v) failed: %w", link, err) } if err := assignIPToLink(ctx, ifStr, nsPid, link, adapter.AllocatedIPAddress, adapter.HostIPAddress, adapter.HostIPPrefixLength, @@ -156,7 +157,7 @@ func NetNSConfig(ctx context.Context, ifStr string, nsPid int, adapter *prot.Net } if err != nil { entry.WithError(err).Debugf("udhcpc failed [%s]", cos) - return errors.Wrapf(err, "process failed (%s)", cos) + return fmt.Errorf("process failed (%s): %w", cos, err) } } var cos string @@ -210,7 +211,7 @@ func assignIPToLink(ctx context.Context, // Set IP address ip, addr, err := net.ParseCIDR(allocatedIP + "/" + strconv.FormatUint(uint64(prefixLen), 10)) if err != nil { - return errors.Wrapf(err, "parsing address %s/%d failed", allocatedIP, prefixLen) + return fmt.Errorf("parsing address %s/%d failed: %w", allocatedIP, prefixLen, err) } // the IP address field in addr is masked, so replace it with the original ip address addr.IP = ip @@ -220,7 +221,7 @@ func assignIPToLink(ctx context.Context, }).Debugf("parsed ip address %s/%d", allocatedIP, prefixLen) ipAddr := &netlink.Addr{IPNet: addr, Label: ""} if err := netlink.AddrAdd(link, ipAddr); err != nil { - return errors.Wrapf(err, "netlink.AddrAdd(%#v, %#v) failed", link, ipAddr) + return fmt.Errorf("netlink.AddrAdd(%#v, %#v) failed: %w", link, ipAddr, err) } if gatewayIP == "" { return nil @@ -228,7 +229,7 @@ func assignIPToLink(ctx context.Context, // Set gateway gw := net.ParseIP(gatewayIP) if gw == nil { - return errors.Wrapf(err, "parsing gateway address %s failed", gatewayIP) + return fmt.Errorf("parsing gateway address %s failed: %w", gatewayIP, err) } if !addr.Contains(gw) { @@ -243,7 +244,7 @@ func assignIPToLink(ctx context.Context, Mask: net.CIDRMask(ml, ml)} ipAddr2 := &netlink.Addr{IPNet: addr2, Label: ""} if err := netlink.AddrAdd(link, ipAddr2); err != nil { - return errors.Wrapf(err, "netlink.AddrAdd(%#v, %#v) failed", link, ipAddr2) + return fmt.Errorf("netlink.AddrAdd(%#v, %#v) failed: %w", link, ipAddr2, err) } } @@ -262,7 +263,7 @@ func assignIPToLink(ctx context.Context, rule.Priority = 5 if err := netlink.RuleAdd(rule); err != nil { - return errors.Wrapf(err, "netlink.RuleAdd(%#v) failed", rule) + return fmt.Errorf("netlink.RuleAdd(%#v) failed: %w", rule, err) } table = rule.Table } @@ -275,7 +276,7 @@ func assignIPToLink(ctx context.Context, Priority: metric, } if err := netlink.RouteAdd(&route); err != nil { - return errors.Wrapf(err, "netlink.RouteAdd(%#v) failed", route) + return fmt.Errorf("netlink.RouteAdd(%#v) failed: %w", route, err) } return nil } diff --git a/internal/guest/network/network.go b/internal/guest/network/network.go index 68f7c1bef1..e7a106ab58 100644 --- a/internal/guest/network/network.go +++ b/internal/guest/network/network.go @@ -17,7 +17,7 @@ import ( "github.com/Microsoft/hcsshim/internal/guest/storage/vmbus" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "go.opencensus.io/trace" ) @@ -69,7 +69,7 @@ func GenerateResolvConfContent(ctx context.Context, searches, servers, options [ trace.StringAttribute("options", strings.Join(options, ", "))) if len(searches) > maxDNSSearches { - return "", errors.Errorf("searches has more than %d domains", maxDNSSearches) + return "", fmt.Errorf("searches has more than %d domains", maxDNSSearches) } content := "" @@ -136,7 +136,7 @@ func InstanceIDToName(ctx context.Context, id string, vpciAssigned bool) (_ stri netDevicePath, err = vmbusWaitForDevicePath(ctx, vmBusNetSubPath) } if err != nil { - return "", errors.Wrapf(err, "failed to find adapter %v sysfs path", vmBusID) + return "", fmt.Errorf("failed to find adapter %v sysfs path: %w", vmBusID, err) } var deviceDirs []os.DirEntry @@ -146,22 +146,22 @@ func InstanceIDToName(ctx context.Context, id string, vpciAssigned bool) (_ stri if os.IsNotExist(err) { select { case <-ctx.Done(): - return "", errors.Wrap(ctx.Err(), "timed out waiting for net adapter") + return "", fmt.Errorf("timed out waiting for net adapter: %w", ctx.Err()) default: time.Sleep(10 * time.Millisecond) continue } } else { - return "", errors.Wrapf(err, "failed to read vmbus network device from /sys filesystem for adapter %s", vmBusID) + return "", fmt.Errorf("failed to read vmbus network device from /sys filesystem for adapter %s: %w", vmBusID, err) } } break } if len(deviceDirs) == 0 { - return "", errors.Errorf("no interface name found for adapter %s", vmBusID) + return "", fmt.Errorf("no interface name found for adapter %s", vmBusID) } if len(deviceDirs) > 1 { - return "", errors.Errorf("multiple interface names found for adapter %s", vmBusID) + return "", fmt.Errorf("multiple interface names found for adapter %s", vmBusID) } ifname := deviceDirs[0].Name() log.G(ctx).WithField("ifname", ifname).Debug("resolved ifname") diff --git a/internal/guest/prot/protocol.go b/internal/guest/prot/protocol.go index 891891d510..0ca113e092 100644 --- a/internal/guest/prot/protocol.go +++ b/internal/guest/prot/protocol.go @@ -5,11 +5,11 @@ package prot import ( "encoding/json" + "fmt" "strconv" v1 "github.com/containerd/cgroups/v3/cgroup1/stats" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/Microsoft/hcsshim/internal/guest/commonutils" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" @@ -518,14 +518,14 @@ func UnmarshalContainerModifySettings(b []byte) (*ContainerModifySettings, error var requestRawSettings json.RawMessage request.Request = &requestRawSettings if err := commonutils.UnmarshalJSONWithHresult(b, &request); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal ContainerModifySettings") + return nil, fmt.Errorf("failed to unmarshal ContainerModifySettings: %w", err) } var msr guestrequest.ModificationRequest var msrRawSettings json.RawMessage msr.Settings = &msrRawSettings if err := commonutils.UnmarshalJSONWithHresult(requestRawSettings, &msr); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal request.Settings as ModifySettingRequest") + return &request, fmt.Errorf("failed to unmarshal request.Settings as ModifySettingRequest: %w", err) } if msr.RequestType == "" { @@ -537,65 +537,65 @@ func UnmarshalContainerModifySettings(b []byte) (*ContainerModifySettings, error case guestresource.ResourceTypeSCSIDevice: msd := &guestresource.SCSIDevice{} if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, msd); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal settings as SCSIDevice") + return &request, fmt.Errorf("failed to unmarshal settings as SCSIDevice: %w", err) } msr.Settings = msd case guestresource.ResourceTypeMappedVirtualDisk: mvd := &guestresource.LCOWMappedVirtualDisk{} if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, mvd); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal settings as MappedVirtualDiskV2") + return &request, fmt.Errorf("failed to unmarshal settings as MappedVirtualDiskV2: %w", err) } msr.Settings = mvd case guestresource.ResourceTypeMappedDirectory: md := &guestresource.LCOWMappedDirectory{} if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, md); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal settings as MappedDirectoryV2") + return &request, fmt.Errorf("failed to unmarshal settings as MappedDirectoryV2: %w", err) } msr.Settings = md case guestresource.ResourceTypeVPMemDevice: vpd := &guestresource.LCOWMappedVPMemDevice{} if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, vpd); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal hosted settings as MappedVPMemDeviceV2") + return &request, fmt.Errorf("failed to unmarshal hosted settings as MappedVPMemDeviceV2: %w", err) } msr.Settings = vpd case guestresource.ResourceTypeCombinedLayers: cl := &guestresource.LCOWCombinedLayers{} if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, cl); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal settings as CombinedLayersV2") + return &request, fmt.Errorf("failed to unmarshal settings as CombinedLayersV2: %w", err) } msr.Settings = cl case guestresource.ResourceTypeNetwork: na := &guestresource.LCOWNetworkAdapter{} if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, na); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal settings as NetworkAdapterV2") + return &request, fmt.Errorf("failed to unmarshal settings as NetworkAdapterV2: %w", err) } msr.Settings = na case guestresource.ResourceTypeVPCIDevice: vd := &guestresource.LCOWMappedVPCIDevice{} if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, vd); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal settings as MappedVPCIDeviceV2") + return &request, fmt.Errorf("failed to unmarshal settings as MappedVPCIDeviceV2: %w", err) } msr.Settings = vd case guestresource.ResourceTypeContainerConstraints: cc := &guestresource.LCOWContainerConstraints{} if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, cc); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal settings as ContainerConstraintsV2") + return &request, fmt.Errorf("failed to unmarshal settings as ContainerConstraintsV2: %w", err) } msr.Settings = cc case guestresource.ResourceTypeSecurityPolicy: enforcer := &guestresource.LCOWConfidentialOptions{} if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, enforcer); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal settings as LCOWConfidentialOptions") + return &request, fmt.Errorf("failed to unmarshal settings as LCOWConfidentialOptions: %w", err) } msr.Settings = enforcer case guestresource.ResourceTypePolicyFragment: fragment := &guestresource.LCOWSecurityPolicyFragment{} if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, fragment); err != nil { - return &request, errors.Wrap(err, "failed to unmarshal settings as LCOWSecurityPolicyFragment") + return &request, fmt.Errorf("failed to unmarshal settings as LCOWSecurityPolicyFragment: %w", err) } msr.Settings = fragment default: - return &request, errors.Errorf("invalid ResourceType '%s'", msr.ResourceType) + return &request, fmt.Errorf("invalid ResourceType %q", msr.ResourceType) } request.Request = &msr return &request, nil diff --git a/internal/guest/runtime/hcsv2/container.go b/internal/guest/runtime/hcsv2/container.go index 61af3f1a09..2de0c9668c 100644 --- a/internal/guest/runtime/hcsv2/container.go +++ b/internal/guest/runtime/hcsv2/container.go @@ -14,7 +14,7 @@ import ( cgroups "github.com/containerd/cgroups/v3/cgroup1" v1 "github.com/containerd/cgroups/v3/cgroup1/stats" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "go.opencensus.io/trace" @@ -267,7 +267,7 @@ func (c *Container) GetStats(ctx context.Context) (*v1.Metrics, error) { cgroupPath := c.spec.Linux.CgroupsPath cg, err := cgroups.Load(cgroups.StaticPath(cgroupPath)) if err != nil { - return nil, errors.Errorf("failed to get container stats for %v: %v", c.id, err) + return nil, fmt.Errorf("failed to get container stats for %v: %v", c.id, err) } return cg.Stat(cgroups.IgnoreNotExist) diff --git a/internal/guest/runtime/hcsv2/network.go b/internal/guest/runtime/hcsv2/network.go index e8b0ebec17..0ba95c1677 100644 --- a/internal/guest/runtime/hcsv2/network.go +++ b/internal/guest/runtime/hcsv2/network.go @@ -10,7 +10,6 @@ import ( "sync" "time" - "github.com/pkg/errors" "github.com/vishvananda/netns" "go.opencensus.io/trace" @@ -46,7 +45,7 @@ func getNetworkNamespace(id string) (*namespace, error) { ns, ok := namespaces[id] if !ok { - return nil, gcserr.WrapHresult(errors.Errorf("namespace '%s' not found", id), gcserr.HrErrNotFound) + return nil, gcserr.WrapHresult(fmt.Errorf("namespace %q not found", id), gcserr.HrErrNotFound) } return ns, nil } @@ -86,7 +85,7 @@ func RemoveNetworkNamespace(ctx context.Context, id string) (err error) { ns.m.Lock() defer ns.m.Unlock() if len(ns.nics) > 0 { - return errors.Errorf("network namespace '%s' contains adapters", id) + return fmt.Errorf("network namespace %q contains adapters", id) } delete(namespaces, id) } @@ -123,7 +122,7 @@ func (n *namespace) AssignContainerPid(ctx context.Context, pid int) (err error) defer n.m.Unlock() if n.pid != 0 { - return errors.Errorf("previously assigned container pid %d to network namespace %q", n.pid, n.id) + return fmt.Errorf("previously assigned container pid %d to network namespace %q", n.pid, n.id) } n.pid = pid @@ -159,7 +158,7 @@ func (n *namespace) AddAdapter(ctx context.Context, adp *guestresource.LCOWNetwo for _, nic := range n.nics { if strings.EqualFold(nic.adapter.ID, adp.ID) { - return errors.Errorf("adapter with id: '%s' already present in namespace", adp.ID) + return fmt.Errorf("adapter with id: %q already present in namespace", adp.ID) } } @@ -265,13 +264,13 @@ func (nin *nicInNamespace) assignToPid(ctx context.Context, pid int) (err error) } if err := network.MoveInterfaceToNS(nin.ifname, pid); err != nil { - return errors.Wrapf(err, "failed to move interface %s to network namespace", nin.ifname) + return fmt.Errorf("failed to move interface %s to network namespace: %w", nin.ifname, err) } // Get a reference to the new network namespace ns, err := netns.GetFromPid(pid) if err != nil { - return errors.Wrapf(err, "netns.GetFromPid(%d) failed", pid) + return fmt.Errorf("netns.GetFromPid(%d) failed: %w", pid, err) } defer ns.Close() @@ -280,7 +279,7 @@ func (nin *nicInNamespace) assignToPid(ctx context.Context, pid int) (err error) } if err := network.DoInNetNS(ns, netNSCfg); err != nil { - return errors.Wrapf(err, "failed to configure adapter aid: %s, if id: %s", nin.adapter.ID, nin.ifname) + return fmt.Errorf("failed to configure adapter aid: %s, if id: %s: %w", nin.adapter.ID, nin.ifname, err) } nin.assignedPid = pid return nil diff --git a/internal/guest/runtime/hcsv2/nvidia_utils.go b/internal/guest/runtime/hcsv2/nvidia_utils.go index 59d9f50654..2379be789c 100644 --- a/internal/guest/runtime/hcsv2/nvidia_utils.go +++ b/internal/guest/runtime/hcsv2/nvidia_utils.go @@ -12,7 +12,6 @@ import ( "strings" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/Microsoft/hcsshim/cmd/gcstools/generichook" "github.com/Microsoft/hcsshim/internal/guest/storage/pci" @@ -29,7 +28,7 @@ func addNvidiaDeviceHook(ctx context.Context, spec *oci.Spec, ociBundlePath stri genericHookBinary := "generichook" genericHookPath, err := exec.LookPath(genericHookBinary) if err != nil { - return errors.Wrapf(err, "failed to find %s for container device support", genericHookBinary) + return fmt.Errorf("failed to find %s for container device support: %w", genericHookBinary, err) } toolDebugPath := filepath.Join(ociBundlePath, nvidiaDebugFilePath) @@ -54,7 +53,7 @@ func addNvidiaDeviceHook(ctx context.Context, spec *oci.Spec, ociBundlePath stri case "gpu": busLocation, err := pci.FindDeviceBusLocationFromVMBusGUID(ctx, d.ID) if err != nil { - return errors.Wrapf(err, "failed to find nvidia gpu bus location") + return fmt.Errorf("failed to find nvidia gpu bus location: %w", err) } args = append(args, fmt.Sprintf("--device=%s", busLocation)) } diff --git a/internal/guest/runtime/hcsv2/process.go b/internal/guest/runtime/hcsv2/process.go index e29e6e62f7..8f7b209a34 100644 --- a/internal/guest/runtime/hcsv2/process.go +++ b/internal/guest/runtime/hcsv2/process.go @@ -5,6 +5,7 @@ package hcsv2 import ( "context" + "errors" "fmt" "os/exec" "sync" @@ -17,7 +18,7 @@ import ( "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "go.opencensus.io/trace" ) @@ -233,7 +234,7 @@ func newExternalProcess(ctx context.Context, cmd *exec.Cmd, tty *stdio.TtyRelay, remove: onRemove, } if err := cmd.Start(); err != nil { - return nil, errors.Wrap(err, "failed to call Start for external process") + return nil, fmt.Errorf("failed to call Start for external process: %w", err) } if tty != nil { tty.Start() diff --git a/internal/guest/runtime/hcsv2/sandbox_container.go b/internal/guest/runtime/hcsv2/sandbox_container.go index 864d7221c5..a2d55784f0 100644 --- a/internal/guest/runtime/hcsv2/sandbox_container.go +++ b/internal/guest/runtime/hcsv2/sandbox_container.go @@ -5,12 +5,13 @@ package hcsv2 import ( "context" + "fmt" "os" "path/filepath" "strings" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "go.opencensus.io/trace" "github.com/Microsoft/hcsshim/internal/guest/network" @@ -40,7 +41,7 @@ func setupSandboxContainerSpec(ctx context.Context, id string, spec *oci.Spec) ( // Generate the sandbox root dir rootDir := specInternal.SandboxRootDir(id) if err := os.MkdirAll(rootDir, 0755); err != nil { - return errors.Wrapf(err, "failed to create sandbox root directory %q", rootDir) + return fmt.Errorf("failed to create sandbox root directory %q: %w", rootDir, err) } defer func() { if err != nil { @@ -54,20 +55,20 @@ func setupSandboxContainerSpec(ctx context.Context, id string, spec *oci.Spec) ( var err error hostname, err = os.Hostname() if err != nil { - return errors.Wrap(err, "failed to get hostname") + return fmt.Errorf("failed to get hostname: %w", err) } } sandboxHostnamePath := getSandboxHostnamePath(id) if err := os.WriteFile(sandboxHostnamePath, []byte(hostname+"\n"), 0644); err != nil { - return errors.Wrapf(err, "failed to write hostname to %q", sandboxHostnamePath) + return fmt.Errorf("failed to write hostname to %q: %w", sandboxHostnamePath, err) } // Write the hosts sandboxHostsContent := network.GenerateEtcHostsContent(ctx, hostname) sandboxHostsPath := getSandboxHostsPath(id) if err := os.WriteFile(sandboxHostsPath, []byte(sandboxHostsContent), 0644); err != nil { - return errors.Wrapf(err, "failed to write sandbox hosts to %q", sandboxHostsPath) + return fmt.Errorf("failed to write sandbox hosts to %q: %w", sandboxHostsPath, err) } // Write resolv.conf @@ -86,11 +87,11 @@ func setupSandboxContainerSpec(ctx context.Context, id string, spec *oci.Spec) ( } resolvContent, err := network.GenerateResolvConfContent(ctx, searches, servers, nil) if err != nil { - return errors.Wrap(err, "failed to generate sandbox resolv.conf content") + return fmt.Errorf("failed to generate sandbox resolv.conf content: %w", err) } sandboxResolvPath := getSandboxResolvPath(id) if err := os.WriteFile(sandboxResolvPath, []byte(resolvContent), 0644); err != nil { - return errors.Wrap(err, "failed to write sandbox resolv.conf") + return fmt.Errorf("failed to write sandbox resolv.conf: %w", err) } // User.Username is generally only used on Windows, but as there's no (easy/fast at least) way to grab diff --git a/internal/guest/runtime/hcsv2/spec.go b/internal/guest/runtime/hcsv2/spec.go index 70d1ecacaa..e135c0c71e 100644 --- a/internal/guest/runtime/hcsv2/spec.go +++ b/internal/guest/runtime/hcsv2/spec.go @@ -5,6 +5,7 @@ package hcsv2 import ( "context" + "errors" "fmt" "math" "path/filepath" @@ -15,7 +16,6 @@ import ( "github.com/opencontainers/runc/libcontainer/devices" "github.com/opencontainers/runc/libcontainer/user" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/pkg/annotations" @@ -71,11 +71,11 @@ func setCoreRLimit(spec *oci.Spec, value string) error { soft, err := strconv.ParseUint(vals[0], 10, 64) if err != nil { - return errors.Wrap(err, "failed to parse soft core rlimit") + return fmt.Errorf("failed to parse soft core rlimit: %w", err) } hard, err := strconv.ParseUint(vals[1], 10, 64) if err != nil { - return errors.Wrap(err, "failed to parse hard core rlimit") + return fmt.Errorf("failed to parse hard core rlimit: %w", err) } spec.Process.Rlimits = append(spec.Process.Rlimits, oci.POSIXRlimit{ @@ -117,7 +117,7 @@ func setUserStr(spec *oci.Spec, userstr string) error { return setUsername(spec, userstr) } if outOfUint32Bounds(v) { - return errors.Errorf("UID (%d) exceeds uint32 bounds", v) + return fmt.Errorf("UID (%d) exceeds uint32 bounds", v) } return setUserID(spec, uint32(v)) case 2: @@ -131,7 +131,7 @@ func setUserStr(spec *oci.Spec, userstr string) error { username = parts[0] } else { if outOfUint32Bounds(v) { - return errors.Errorf("UID (%d) exceeds uint32 bounds", v) + return fmt.Errorf("UID (%d) exceeds uint32 bounds", v) } uid = uint32(v) } @@ -141,7 +141,7 @@ func setUserStr(spec *oci.Spec, userstr string) error { groupname = parts[1] } else { if outOfUint32Bounds(v) { - return errors.Errorf("GID (%d) for user %q exceeds uint32 bounds", v, parts[0]) + return fmt.Errorf("GID (%d) for user %q exceeds uint32 bounds", v, parts[0]) } gid = uint32(v) } @@ -151,11 +151,11 @@ func setUserStr(spec *oci.Spec, userstr string) error { return u.Name == username }) if err != nil { - return errors.Wrapf(err, "failed to find user by username: %s", username) + return fmt.Errorf("failed to find user by username: %s: %w", username, err) } if outOfUint32Bounds(u.Uid) { - return errors.Errorf("UID (%d) for username %q exceeds uint32 bounds", u.Uid, username) + return fmt.Errorf("UID (%d) for username %q exceeds uint32 bounds", u.Uid, username) } uid = uint32(u.Uid) } @@ -164,11 +164,11 @@ func setUserStr(spec *oci.Spec, userstr string) error { return g.Name == groupname }) if err != nil { - return errors.Wrapf(err, "failed to find group by groupname: %s", groupname) + return fmt.Errorf("failed to find group by groupname: %s: %w", groupname, err) } if outOfUint32Bounds(g.Gid) { - return errors.Errorf("GID (%d) for groupname %q exceeds uint32 bounds", g.Gid, groupname) + return fmt.Errorf("GID (%d) for groupname %q exceeds uint32 bounds", g.Gid, groupname) } gid = uint32(g.Gid) } @@ -185,13 +185,13 @@ func setUsername(spec *oci.Spec, username string) error { return u.Name == username }) if err != nil { - return errors.Wrapf(err, "failed to find user by username: %s", username) + return fmt.Errorf("failed to find user by username: %s: %w", username, err) } if outOfUint32Bounds(u.Uid) { - return errors.Errorf("UID (%d) for username %q exceeds uint32 bounds", u.Uid, username) + return fmt.Errorf("UID (%d) for username %q exceeds uint32 bounds", u.Uid, username) } if outOfUint32Bounds(u.Gid) { - return errors.Errorf("GID (%d) for username %q exceeds uint32 bounds", u.Gid, username) + return fmt.Errorf("GID (%d) for username %q exceeds uint32 bounds", u.Gid, username) } spec.Process.User.UID, spec.Process.User.GID = uint32(u.Uid), uint32(u.Gid) return nil @@ -207,7 +207,7 @@ func setUserID(spec *oci.Spec, uid uint32) error { } if outOfUint32Bounds(u.Gid) { - return errors.Errorf("GID (%d) for UID %d exceeds uint32 bounds", u.Gid, uid) + return fmt.Errorf("GID (%d) for UID %d exceeds uint32 bounds", u.Gid, uid) } spec.Process.User.UID, spec.Process.User.GID = uid, uint32(u.Gid) return nil @@ -219,7 +219,7 @@ func getUser(spec *oci.Spec, filter func(user.User) bool) (user.User, error) { return user.User{}, err } if len(users) != 1 { - return user.User{}, errors.Errorf("expected exactly 1 user matched '%d'", len(users)) + return user.User{}, fmt.Errorf("expected exactly 1 user matched '%d'", len(users)) } return users[0], nil } @@ -230,7 +230,7 @@ func getGroup(spec *oci.Spec, filter func(user.Group) bool) (user.Group, error) return user.Group{}, err } if len(groups) != 1 { - return user.Group{}, errors.Errorf("expected exactly 1 group matched '%d'", len(groups)) + return user.Group{}, fmt.Errorf("expected exactly 1 group matched '%d'", len(groups)) } return groups[0], nil } diff --git a/internal/guest/runtime/hcsv2/spec_devices.go b/internal/guest/runtime/hcsv2/spec_devices.go index f4c403c503..59465a8f7f 100644 --- a/internal/guest/runtime/hcsv2/spec_devices.go +++ b/internal/guest/runtime/hcsv2/spec_devices.go @@ -5,6 +5,7 @@ package hcsv2 import ( "context" + "errors" "fmt" "path/filepath" "strings" @@ -14,7 +15,6 @@ import ( "github.com/Microsoft/hcsshim/internal/log" "github.com/opencontainers/runc/libcontainer/devices" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) const ( @@ -42,12 +42,12 @@ func addAssignedDevice(ctx context.Context, spec *oci.Spec) error { // validate that the device is available fullPCIPath, err := pci.FindDeviceFullPath(ctx, d.ID) if err != nil { - return errors.Wrapf(err, "failed to find device pci path for device %v", d) + return fmt.Errorf("failed to find device pci path for device %v: %w", d, err) } // find the device nodes that link to the pci path we just got devs, err := devicePathsFromPCIPath(ctx, fullPCIPath) if err != nil { - return errors.Wrapf(err, "failed to find dev node for device %v", d) + return fmt.Errorf("failed to find dev node for device %v: %w", d, err) } for _, dev := range devs { addLinuxDeviceToSpec(ctx, dev, spec, true) diff --git a/internal/guest/runtime/hcsv2/standalone_container.go b/internal/guest/runtime/hcsv2/standalone_container.go index f4f07b262e..c8b5b83118 100644 --- a/internal/guest/runtime/hcsv2/standalone_container.go +++ b/internal/guest/runtime/hcsv2/standalone_container.go @@ -5,12 +5,13 @@ package hcsv2 import ( "context" + "fmt" "os" "path/filepath" "strings" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "go.opencensus.io/trace" "github.com/Microsoft/hcsshim/internal/guest/network" @@ -44,7 +45,7 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec // Generate the standalone root dir rootDir := getStandaloneRootDir(id) if err := os.MkdirAll(rootDir, 0755); err != nil { - return errors.Wrapf(err, "failed to create container root directory %q", rootDir) + return fmt.Errorf("failed to create container root directory %q: %w", rootDir, err) } defer func() { if err != nil { @@ -57,7 +58,7 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec var err error hostname, err = os.Hostname() if err != nil { - return errors.Wrap(err, "failed to get hostname") + return fmt.Errorf("failed to get hostname: %w", err) } } @@ -65,7 +66,7 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec if !specInternal.MountPresent("/etc/hostname", spec.Mounts) { standaloneHostnamePath := getStandaloneHostnamePath(id) if err := os.WriteFile(standaloneHostnamePath, []byte(hostname+"\n"), 0644); err != nil { - return errors.Wrapf(err, "failed to write hostname to %q", standaloneHostnamePath) + return fmt.Errorf("failed to write hostname to %q: %w", standaloneHostnamePath, err) } mt := oci.Mount{ @@ -85,7 +86,7 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec standaloneHostsContent := network.GenerateEtcHostsContent(ctx, hostname) standaloneHostsPath := getStandaloneHostsPath(id) if err := os.WriteFile(standaloneHostsPath, []byte(standaloneHostsContent), 0644); err != nil { - return errors.Wrapf(err, "failed to write standalone hosts to %q", standaloneHostsPath) + return fmt.Errorf("failed to write standalone hosts to %q: %w", standaloneHostsPath, err) } mt := oci.Mount{ @@ -114,11 +115,11 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec } resolvContent, err := network.GenerateResolvConfContent(ctx, searches, servers, nil) if err != nil { - return errors.Wrap(err, "failed to generate standalone resolv.conf content") + return fmt.Errorf("failed to generate standalone resolv.conf content: %w", err) } standaloneResolvPath := getStandaloneResolvPath(id) if err := os.WriteFile(standaloneResolvPath, []byte(resolvContent), 0644); err != nil { - return errors.Wrap(err, "failed to write standalone resolv.conf") + return fmt.Errorf("failed to write standalone resolv.conf: %w", err) } mt := oci.Mount{ diff --git a/internal/guest/runtime/hcsv2/uvm.go b/internal/guest/runtime/hcsv2/uvm.go index 171ff3f448..d2a4db79fa 100644 --- a/internal/guest/runtime/hcsv2/uvm.go +++ b/internal/guest/runtime/hcsv2/uvm.go @@ -9,6 +9,7 @@ import ( "crypto/sha256" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "os" @@ -45,7 +46,7 @@ import ( "github.com/Microsoft/hcsshim/pkg/securitypolicy" "github.com/mattn/go-shellwords" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -178,7 +179,7 @@ func (h *Host) InjectFragment(ctx context.Context, fragment *guestresource.LCOWS sha.Write(blob) timestamp := time.Now() fragmentPath := fmt.Sprintf("fragment-%x-%d.blob", sha.Sum(nil), timestamp.UnixMilli()) - _ = os.WriteFile(filepath.Join("/tmp", fragmentPath), blob, 0644) + _ = os.WriteFile(filepath.Join("/tmp", fragmentPath), blob, 0o644) unpacked, err := cosesign1.UnpackAndValidateCOSE1CertChain(raw) if err != nil { @@ -277,8 +278,8 @@ func (h *Host) AddContainer(id string, c *Container) error { func setupSandboxMountsPath(id string) (err error) { mountPath := spec.SandboxMountsDir(id) - if err := os.MkdirAll(mountPath, 0755); err != nil { - return errors.Wrapf(err, "failed to create sandboxMounts dir in sandbox %v", id) + if err := os.MkdirAll(mountPath, 0o755); err != nil { + return fmt.Errorf("failed to create sandboxMounts dir in sandbox %v: %w", id, err) } defer func() { if err != nil { @@ -291,8 +292,8 @@ func setupSandboxMountsPath(id string) (err error) { func setupSandboxHugePageMountsPath(id string) error { mountPath := spec.HugePagesMountsDir(id) - if err := os.MkdirAll(mountPath, 0755); err != nil { - return errors.Wrapf(err, "failed to create hugepage Mounts dir in sandbox %v", id) + if err := os.MkdirAll(mountPath, 0o755); err != nil { + return fmt.Errorf("failed to create hugepage Mounts dir in sandbox %v: %w", id, err) } return storage.MountRShared(mountPath) @@ -361,7 +362,7 @@ func (h *Host) CreateContainer(ctx context.Context, id string, settings *prot.VM sid, ok := settings.OCISpecification.Annotations[annotations.KubernetesSandboxID] sandboxID = sid if !ok || sid == "" { - return nil, errors.Errorf("unsupported 'io.kubernetes.cri.sandbox-id': '%s'", sid) + return nil, fmt.Errorf("unsupported %q: %q", annotations.KubernetesSandboxID, sid) } if err := setupWorkloadContainerSpec(ctx, sid, id, settings.OCISpecification, settings.OCIBundlePath); err != nil { return nil, err @@ -385,7 +386,7 @@ func (h *Host) CreateContainer(ctx context.Context, id string, settings *prot.VM return nil, err } default: - return nil, errors.Errorf("unsupported 'io.kubernetes.cri.container-type': '%s'", criType) + return nil, fmt.Errorf("unsupported %q: %q", annotations.KubernetesContainerType, criType) } } else { // Capture namespaceID if any because setupStandaloneContainerSpec clears the Windows section. @@ -431,7 +432,7 @@ func (h *Host) CreateContainer(ctx context.Context, id string, settings *prot.VM seccomp, ) if err != nil { - return nil, errors.Wrapf(err, "container creation denied due to policy") + return nil, fmt.Errorf("container creation denied due to policy: %w", err) } if !allowStdio { @@ -465,23 +466,23 @@ func (h *Host) CreateContainer(ctx context.Context, id string, settings *prot.VM return nil, fmt.Errorf("failed to create security context directory: %w", err) } // Make sure that files inside directory are readable - if err := os.Chmod(securityContextDir, 0755); err != nil { + if err := os.Chmod(securityContextDir, 0o755); err != nil { return nil, fmt.Errorf("failed to chmod security context directory: %w", err) } if len(encodedPolicy) > 0 { - if err := writeFileInDir(securityContextDir, securitypolicy.PolicyFilename, []byte(encodedPolicy), 0744); err != nil { + if err := writeFileInDir(securityContextDir, securitypolicy.PolicyFilename, []byte(encodedPolicy), 0o744); err != nil { return nil, fmt.Errorf("failed to write security policy: %w", err) } } if len(h.uvmReferenceInfo) > 0 { - if err := writeFileInDir(securityContextDir, securitypolicy.ReferenceInfoFilename, []byte(h.uvmReferenceInfo), 0744); err != nil { + if err := writeFileInDir(securityContextDir, securitypolicy.ReferenceInfoFilename, []byte(h.uvmReferenceInfo), 0o744); err != nil { return nil, fmt.Errorf("failed to write UVM reference info: %w", err) } } if len(hostAMDCert) > 0 { - if err := writeFileInDir(securityContextDir, securitypolicy.HostAMDCertFilename, []byte(hostAMDCert), 0744); err != nil { + if err := writeFileInDir(securityContextDir, securitypolicy.HostAMDCertFilename, []byte(hostAMDCert), 0o744); err != nil { return nil, fmt.Errorf("failed to write host AMD certificate: %w", err) } } @@ -493,30 +494,30 @@ func (h *Host) CreateContainer(ctx context.Context, id string, settings *prot.VM } // Create the BundlePath - if err := os.MkdirAll(settings.OCIBundlePath, 0700); err != nil { - return nil, errors.Wrapf(err, "failed to create OCIBundlePath: '%s'", settings.OCIBundlePath) + if err := os.MkdirAll(settings.OCIBundlePath, 0o700); err != nil { + return nil, fmt.Errorf("failed to create OCIBundlePath: %w", err) } configFile := path.Join(settings.OCIBundlePath, "config.json") f, err := os.Create(configFile) if err != nil { - return nil, errors.Wrapf(err, "failed to create config.json at: '%s'", configFile) + return nil, fmt.Errorf("failed to create config.json: %w", err) } defer f.Close() writer := bufio.NewWriter(f) if err := json.NewEncoder(writer).Encode(settings.OCISpecification); err != nil { - return nil, errors.Wrapf(err, "failed to write OCISpecification to config.json at: '%s'", configFile) + return nil, fmt.Errorf("failed to write OCISpecification to config.json at %q: %w", configFile, err) } if err := writer.Flush(); err != nil { - return nil, errors.Wrapf(err, "failed to flush writer for config.json at: '%s'", configFile) + return nil, fmt.Errorf("failed to flush writer for config.json at %q: %w", configFile, err) } con, err := h.rtime.CreateContainer(id, settings.OCIBundlePath, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create container") + return nil, fmt.Errorf("failed to create container: %w", err) } init, err := con.GetInitProcess() if err != nil { - return nil, errors.Wrapf(err, "failed to get container init process") + return nil, fmt.Errorf("failed to get container init process: %w", err) } c.container = con @@ -619,7 +620,7 @@ func (h *Host) modifyHostSettings(ctx context.Context, containerID string, req * } return h.InjectFragment(ctx, r) default: - return errors.Errorf("the ResourceType %q is not supported for UVM", req.ResourceType) + return fmt.Errorf("the ResourceType %q is not supported for UVM", req.ResourceType) } } @@ -633,7 +634,7 @@ func (h *Host) modifyContainerSettings(ctx context.Context, containerID string, case guestresource.ResourceTypeContainerConstraints: return c.modifyContainerConstraints(ctx, req.RequestType, req.Settings.(*guestresource.LCOWContainerConstraints)) default: - return errors.Errorf("the ResourceType \"%s\" is not supported for containers", req.ResourceType) + return fmt.Errorf("the ResourceType %q is not supported for containers", req.ResourceType) } } @@ -716,7 +717,7 @@ func (h *Host) ExecProcess(ctx context.Context, containerID string, params prot. params.WorkingDirectory, ) if err != nil { - return pid, errors.Wrapf(err, "exec is denied due to policy") + return pid, fmt.Errorf("exec is denied due to policy: %w", err) } // It makes no sense to allow access if stdio access is denied and the @@ -729,7 +730,7 @@ func (h *Host) ExecProcess(ctx context.Context, containerID string, params prot. params.Environment = processOCIEnvToParam(envToKeep) } - var tport = h.vsock + tport := h.vsock if !allowStdioAccess { tport = h.devNullTransport } @@ -769,7 +770,7 @@ func (h *Host) ExecProcess(ctx context.Context, containerID string, params prot. params.OCIProcess.Capabilities, ) if err != nil { - return pid, errors.Wrapf(err, "exec in container denied due to policy") + return pid, fmt.Errorf("exec in container denied due to policy: %w", err) } // It makes no sense to allow access if stdio access is denied and the @@ -807,7 +808,7 @@ func (h *Host) GetExternalProcess(pid int) (Process, error) { func (h *Host) GetProperties(ctx context.Context, containerID string, query prot.PropertyQuery) (*prot.PropertiesV2, error) { err := h.securityPolicyEnforcer.EnforceGetPropertiesPolicy(ctx) if err != nil { - return nil, errors.Wrapf(err, "get properties denied due to policy") + return nil, fmt.Errorf("get properties denied due to policy: %w", err) } c, err := h.GetCreatedContainer(containerID) @@ -825,7 +826,7 @@ func (h *Host) GetProperties(ctx context.Context, containerID string, query prot properties.ProcessList = make([]prot.ProcessDetails, len(pids)) for i, pid := range pids { if outOfUint32Bounds(pid) { - return nil, errors.Errorf("PID (%d) exceeds uint32 bounds", pid) + return nil, fmt.Errorf("PID (%d) exceeds uint32 bounds", pid) } properties.ProcessList[i].ProcessID = uint32(pid) } @@ -844,7 +845,7 @@ func (h *Host) GetProperties(ctx context.Context, containerID string, query prot func (h *Host) GetStacks(ctx context.Context) (string, error) { err := h.securityPolicyEnforcer.EnforceDumpStacksPolicy(ctx) if err != nil { - return "", errors.Wrapf(err, "dump stacks denied due to policy") + return "", fmt.Errorf("dump stacks denied due to policy: %w", err) } return debug.DumpStacks(), nil @@ -889,7 +890,7 @@ func (h *Host) runExternalProcess( ) master, consolePath, err = stdio.NewConsole() if err != nil { - return -1, errors.Wrap(err, "failed to create console for external process") + return -1, fmt.Errorf("failed to create console for external process: %w", err) } defer func() { if err != nil { @@ -898,9 +899,9 @@ func (h *Host) runExternalProcess( }() var console *os.File - console, err = os.OpenFile(consolePath, os.O_RDWR|syscall.O_NOCTTY, 0777) + console, err = os.OpenFile(consolePath, os.O_RDWR|syscall.O_NOCTTY, 0o777) if err != nil { - return -1, errors.Wrap(err, "failed to open console file for external process") + return -1, fmt.Errorf("failed to open console file for external process: %w", err) } defer console.Close() @@ -919,7 +920,7 @@ func (h *Host) runExternalProcess( var fileSet *stdio.FileSet fileSet, err = stdioSet.Files() if err != nil { - return -1, errors.Wrap(err, "failed to set cmd stdio") + return -1, fmt.Errorf("failed to set cmd stdio: %w", err) } defer fileSet.Close() defer stdioSet.Close() @@ -945,7 +946,7 @@ func (h *Host) runExternalProcess( } func newInvalidRequestTypeError(rt guestrequest.RequestType) error { - return errors.Errorf("the RequestType %q is not supported", rt) + return fmt.Errorf("the RequestType %q is not supported", rt) } func modifySCSIDevice( @@ -1000,7 +1001,7 @@ func modifyMappedVirtualDisk( } err = securityPolicy.EnforceDeviceMountPolicy(ctx, mvd.MountPath, deviceHash) if err != nil { - return errors.Wrapf(err, "mounting scsi device controller %d lun %d onto %s denied by policy", mvd.Controller, mvd.Lun, mvd.MountPath) + return fmt.Errorf("mounting scsi device controller %d lun %d onto %s denied by policy: %w", mvd.Controller, mvd.Lun, mvd.MountPath, err) } } config := &scsi.Config{ @@ -1050,14 +1051,14 @@ func modifyMappedDirectory( case guestrequest.RequestTypeAdd: err = securityPolicy.EnforcePlan9MountPolicy(ctx, md.MountPath) if err != nil { - return errors.Wrapf(err, "mounting plan9 device at %s denied by policy", md.MountPath) + return fmt.Errorf("mounting plan9 device at %s denied by policy: %w", md.MountPath, err) } return plan9.Mount(ctx, vsock, md.MountPath, md.ShareName, uint32(md.Port), md.ReadOnly) case guestrequest.RequestTypeRemove: err = securityPolicy.EnforcePlan9UnmountPolicy(ctx, md.MountPath) if err != nil { - return errors.Wrapf(err, "unmounting plan9 device at %s denied by policy", md.MountPath) + return fmt.Errorf("unmounting plan9 device at %s denied by policy: %w", md.MountPath, err) } return storage.UnmountPath(ctx, md.MountPath, true) @@ -1087,13 +1088,13 @@ func modifyMappedVPMemDevice(ctx context.Context, case guestrequest.RequestTypeAdd: err = securityPolicy.EnforceDeviceMountPolicy(ctx, vpd.MountPath, deviceHash) if err != nil { - return errors.Wrapf(err, "mounting pmem device %d onto %s denied by policy", vpd.DeviceNumber, vpd.MountPath) + return fmt.Errorf("mounting pmem device %d onto %s denied by policy: %w", vpd.DeviceNumber, vpd.MountPath, err) } return pmem.Mount(ctx, vpd.DeviceNumber, vpd.MountPath, vpd.MappingInfo, verityInfo) case guestrequest.RequestTypeRemove: if err := securityPolicy.EnforceDeviceUnmountPolicy(ctx, vpd.MountPath); err != nil { - return errors.Wrapf(err, "unmounting pmem device from %s denied by policy", vpd.MountPath) + return fmt.Errorf("unmounting pmem device from %s denied by policy: %w", vpd.MountPath, err) } return pmem.Unmount(ctx, vpd.DeviceNumber, vpd.MountPath, vpd.MappingInfo, verityInfo) @@ -1147,7 +1148,7 @@ func modifyCombinedLayers( return overlay.MountLayer(ctx, layerPaths, upperdirPath, workdirPath, cl.ContainerRootPath, readonly) case guestrequest.RequestTypeRemove: if err := securityPolicy.EnforceOverlayUnmountPolicy(ctx, cl.ContainerRootPath); err != nil { - return errors.Wrap(err, "overlay removal denied by policy") + return fmt.Errorf("overlay removal denied by policy: %w", err) } return storage.UnmountPath(ctx, cl.ContainerRootPath, true) @@ -1183,7 +1184,7 @@ func modifyNetwork(ctx context.Context, rt guestrequest.RequestType, na *guestre func processParamCommandLineToOCIArgs(commandLine string) ([]string, error) { args, err := shellwords.Parse(commandLine) if err != nil { - return nil, errors.Wrapf(err, "failed to parse command line string \"%s\"", commandLine) + return nil, fmt.Errorf("failed to parse command line string %q: %w", commandLine, err) } return args, nil } diff --git a/internal/guest/runtime/hcsv2/workload_container.go b/internal/guest/runtime/hcsv2/workload_container.go index 6ea39101e8..121930aea2 100644 --- a/internal/guest/runtime/hcsv2/workload_container.go +++ b/internal/guest/runtime/hcsv2/workload_container.go @@ -12,7 +12,7 @@ import ( "github.com/opencontainers/runc/libcontainer/devices" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "go.opencensus.io/trace" "golang.org/x/sys/unix" @@ -40,7 +40,7 @@ func updateSandboxMounts(sbid string, spec *oci.Spec) error { // filepath.Join cleans the resulting path before returning, so it would resolve the relative path if one was given. // Hence, we need to ensure that the resolved path is still under the correct directory if !strings.HasPrefix(sandboxSource, specInternal.SandboxMountsDir(sbid)) { - return errors.Errorf("mount path %v for mount %v is not within sandbox's mounts dir", sandboxSource, m.Source) + return fmt.Errorf("mount path %v for mount %v is not within sandbox's mounts dir", sandboxSource, m.Source) } spec.Mounts[i].Source = sandboxSource @@ -67,7 +67,7 @@ func updateHugePageMounts(sbid string, spec *oci.Spec) error { // filepath.Join cleans the resulting path before returning so it would resolve the relative path if one was given. // Hence, we need to ensure that the resolved path is still under the correct directory if !strings.HasPrefix(hugePageMountSource, mountsDir) { - return errors.Errorf("mount path %v for mount %v is not within hugepages's mounts dir", hugePageMountSource, m.Source) + return fmt.Errorf("mount path %v for mount %v is not within hugepages's mounts dir", hugePageMountSource, m.Source) } spec.Mounts[i].Source = hugePageMountSource @@ -78,7 +78,7 @@ func updateHugePageMounts(sbid string, spec *oci.Spec) error { return err } if err := unix.Mount("none", hugePageMountSource, "hugetlbfs", 0, "pagesize="+pageSize); err != nil { - return errors.Errorf("mount operation failed for %v failed with error %v", hugePageMountSource, err) + return fmt.Errorf("mount operation failed for %v failed with error %v", hugePageMountSource, err) } } } @@ -144,16 +144,16 @@ func setupWorkloadContainerSpec(ctx context.Context, sbid, id string, spec *oci. // Verify no hostname if spec.Hostname != "" { - return errors.Errorf("workload container must not change hostname: %s", spec.Hostname) + return fmt.Errorf("workload container must not change hostname: %s", spec.Hostname) } // update any sandbox mounts with the sandboxMounts directory path and create files if err = updateSandboxMounts(sbid, spec); err != nil { - return errors.Wrapf(err, "failed to update sandbox mounts for container %v in sandbox %v", id, sbid) + return fmt.Errorf("failed to update sandbox mounts for container %v in sandbox %v: %w", id, sbid, err) } if err = updateHugePageMounts(sbid, spec); err != nil { - return errors.Wrapf(err, "failed to update hugepages mounts for container %v in sandbox %v", id, sbid) + return fmt.Errorf("failed to update hugepages mounts for container %v in sandbox %v: %w", id, sbid, err) } if err = updateBlockDeviceMounts(spec); err != nil { @@ -201,7 +201,7 @@ func setupWorkloadContainerSpec(ctx context.Context, sbid, id string, spec *oci. } // add other assigned devices to the spec if err := addAssignedDevice(ctx, spec); err != nil { - return errors.Wrap(err, "failed to add assigned device(s) to the container spec") + return fmt.Errorf("failed to add assigned device(s) to the container spec: %w", err) } } diff --git a/internal/guest/runtime/runc/container.go b/internal/guest/runtime/runc/container.go index e1582be60f..deccbf2471 100644 --- a/internal/guest/runtime/runc/container.go +++ b/internal/guest/runtime/runc/container.go @@ -5,6 +5,8 @@ package runc import ( "encoding/json" + "errors" + "fmt" "net" "os" "path/filepath" @@ -13,7 +15,7 @@ import ( "syscall" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -59,7 +61,7 @@ func (c *container) Start() error { if err != nil { runcErr := getRuncLogError(logPath) c.r.cleanupContainer(c.id) //nolint:errcheck - return errors.Wrapf(runcErr, "runc start failed with %v: %s", err, string(out)) + return fmt.Errorf("runc start failed with %v: %s: %w", err, string(out), runcErr) } return nil } @@ -86,7 +88,7 @@ func (c *container) Kill(signal syscall.Signal) error { out, err := cmd.CombinedOutput() if err != nil { runcErr := parseRuncError(string(out)) - return errors.Wrapf(runcErr, "unknown runc error after kill %v: %s", err, string(out)) + return fmt.Errorf("unknown runc error after kill %v: %s: %w", err, string(out), runcErr) } return nil } @@ -99,7 +101,7 @@ func (c *container) Delete() error { out, err := cmd.CombinedOutput() if err != nil { runcErr := parseRuncError(string(out)) - return errors.Wrapf(runcErr, "runc delete failed with %v: %s", err, string(out)) + return fmt.Errorf("runc delete failed with %v: %s: %w", err, string(out), runcErr) } return c.r.cleanupContainer(c.id) } @@ -110,7 +112,7 @@ func (c *container) Pause() error { out, err := cmd.CombinedOutput() if err != nil { runcErr := parseRuncError(string(out)) - return errors.Wrapf(runcErr, "runc pause failed with %v: %s", err, string(out)) + return fmt.Errorf("runc pause failed with %v: %s: %w", err, string(out), runcErr) } return nil } @@ -123,7 +125,7 @@ func (c *container) Resume() error { out, err := cmd.CombinedOutput() if err != nil { runcErr := getRuncLogError(logPath) - return errors.Wrapf(runcErr, "runc resume failed with %v: %s", err, string(out)) + return fmt.Errorf("runc resume failed with %v: %s: %w", err, string(out), runcErr) } return nil } @@ -134,11 +136,11 @@ func (c *container) GetState() (*runtime.ContainerState, error) { out, err := cmd.CombinedOutput() if err != nil { runcErr := parseRuncError(string(out)) - return nil, errors.Wrapf(runcErr, "runc state failed with %v: %s", err, string(out)) + return nil, fmt.Errorf("runc state failed with %v: %s: %w", err, string(out), runcErr) } var state runtime.ContainerState if err := json.Unmarshal(out, &state); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal the state for container %s", c.id) + return nil, fmt.Errorf("failed to unmarshal the state for container %s: %w", c.id, err) } return &state, nil } @@ -156,7 +158,7 @@ func (c *container) Exists() (bool, error) { if errors.Is(runcErr, runtime.ErrContainerDoesNotExist) { return false, nil } - return false, errors.Wrapf(runcErr, "runc state failed with %v: %s", err, string(out)) + return false, fmt.Errorf("runc state failed with %v: %s: %w", err, string(out), runcErr) } return true, nil } @@ -189,13 +191,13 @@ func (c *container) GetRunningProcesses() ([]runtime.ContainerProcessState, erro // that the process was created by the Runtime. processDirs, err := os.ReadDir(filepath.Join(containerFilesDir, c.id)) if err != nil { - return nil, errors.Wrapf(err, "failed to read the contents of container directory %s", filepath.Join(containerFilesDir, c.id)) + return nil, fmt.Errorf("failed to read the contents of container directory %s: %w", filepath.Join(containerFilesDir, c.id), err) } for _, processDir := range processDirs { if processDir.Name() != initPidFilename { pid, err := strconv.Atoi(processDir.Name()) if err != nil { - return nil, errors.Wrapf(err, "failed to parse string \"%s\" as pid", processDir.Name()) + return nil, fmt.Errorf("failed to parse pid: %w", err) } if _, ok := pidMap[pid]; ok { pidMap[pid].CreatedByRuntime = true @@ -236,7 +238,7 @@ func (c *container) GetAllProcesses() ([]runtime.ContainerProcessState, error) { processDirs, err := os.ReadDir(filepath.Join(containerFilesDir, c.id)) if err != nil { - return nil, errors.Wrapf(err, "failed to read the contents of container directory %s", filepath.Join(containerFilesDir, c.id)) + return nil, fmt.Errorf("failed to read the contents of container directory: %w", err) } // Loop over every process state directory. Since these processes have // process state directories, CreatedByRuntime will be true for all of them. @@ -244,7 +246,7 @@ func (c *container) GetAllProcesses() ([]runtime.ContainerProcessState, error) { if processDir.Name() != initPidFilename { pid, err := strconv.Atoi(processDir.Name()) if err != nil { - return nil, errors.Wrapf(err, "failed to parse string \"%s\" into pid", processDir.Name()) + return nil, fmt.Errorf("failed to parse pid: %w", err) } if c.r.processExists(pid) { // If the process exists in /proc and is in the pidMap, it must @@ -317,11 +319,11 @@ func (c *container) runExecCommand(processDef *oci.Process, stdioSet *stdio.Conn f, err := os.Create(filepath.Join(tempProcessDir, "process.json")) if err != nil { - return nil, errors.Wrapf(err, "failed to create process.json file at %s", filepath.Join(tempProcessDir, "process.json")) + return nil, fmt.Errorf("failed to create process.json file at %s: %w", filepath.Join(tempProcessDir, "process.json"), err) } defer f.Close() if err := json.NewEncoder(f).Encode(processDef); err != nil { - return nil, errors.Wrap(err, "failed to encode JSON into process.json file") + return nil, fmt.Errorf("failed to encode JSON into process.json file: %w", err) } args := []string{"exec"} @@ -342,7 +344,7 @@ func (c *container) startProcess( args := initialArgs if err := setSubreaper(1); err != nil { - return nil, errors.Wrapf(err, "failed to set process as subreaper for process in container %s", c.id) + return nil, fmt.Errorf("failed to set process as subreaper for process in container %s: %w", c.id, err) } if err := c.r.makeLogDir(c.id); err != nil { return nil, err @@ -356,7 +358,7 @@ func (c *container) startProcess( var consoleSockPath string sockListener, consoleSockPath, err = c.r.createConsoleSocket(tempProcessDir) if err != nil { - return nil, errors.Wrapf(err, "failed to create console socket for container %s", c.id) + return nil, fmt.Errorf("failed to create console socket for container %s: %w", c.id, err) } defer sockListener.Close() args = append(args, "--console-socket", consoleSockPath) @@ -369,11 +371,11 @@ func (c *container) startProcess( if !hasTerminal { pipeRelay, err = stdio.NewPipeRelay(stdioSet) if err != nil { - return nil, errors.Wrapf(err, "failed to create a pipe relay connection set for container %s", c.id) + return nil, fmt.Errorf("failed to create a pipe relay connection set for container %s: %w", c.id, err) } fileSet, err := pipeRelay.Files() if err != nil { - return nil, errors.Wrapf(err, "failed to get files for connection set for container %s", c.id) + return nil, fmt.Errorf("failed to get files for connection set for container %s: %w", c.id, err) } // Closing the FileSet here is fine as that end of the pipes will have // already been copied into the child process. @@ -391,7 +393,7 @@ func (c *container) startProcess( if err := cmd.Run(); err != nil { runcErr := getRuncLogError(logPath) - return nil, errors.Wrapf(runcErr, "failed to run runc create/exec call for container %s with %v", c.id, err) + return nil, fmt.Errorf("failed to run runc create/exec call for container %s with %v: %w", c.id, err, runcErr) } var ttyRelay *stdio.TtyRelay @@ -400,7 +402,7 @@ func (c *container) startProcess( master, err = c.r.getMasterFromSocket(sockListener) if err != nil { _ = cmd.Process.Kill() - return nil, errors.Wrapf(err, "failed to get pty master for process in container %s", c.id) + return nil, fmt.Errorf("failed to get pty master for process in container %s: %w", c.id, err) } // Keep master open for the relay unless there is an error. defer func() { @@ -439,7 +441,7 @@ func (c *container) Update(resources interface{}) error { out, err := cmd.CombinedOutput() if err != nil { runcErr := parseRuncError(string(out)) - return errors.Wrapf(runcErr, "runc update request %s failed with %v: %s", string(jsonResources), err, string(out)) + return fmt.Errorf("runc update request %s failed with %v: %s: %w", string(jsonResources), err, string(out), runcErr) } return nil } diff --git a/internal/guest/runtime/runc/ioutils.go b/internal/guest/runtime/runc/ioutils.go index a9f08a3df2..0c7e2dd305 100644 --- a/internal/guest/runtime/runc/ioutils.go +++ b/internal/guest/runtime/runc/ioutils.go @@ -4,11 +4,12 @@ package runc import ( + "errors" + "fmt" "net" "os" "path/filepath" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -19,11 +20,11 @@ func (*runcRuntime) createConsoleSocket(processDir string) (listener *net.UnixLi socketPath = filepath.Join(processDir, "master.sock") addr, err := net.ResolveUnixAddr("unix", socketPath) if err != nil { - return nil, "", errors.Wrapf(err, "failed to resolve unix socket at address %s", socketPath) + return nil, "", fmt.Errorf("failed to resolve unix socket at address %s: %w", socketPath, err) } listener, err = net.ListenUnix("unix", addr) if err != nil { - return nil, "", errors.Wrapf(err, "failed to listen on unix socket at address %s", socketPath) + return nil, "", fmt.Errorf("failed to listen on unix socket at address %s: %w", socketPath, err) } return listener, socketPath, nil } @@ -35,7 +36,7 @@ func (*runcRuntime) getMasterFromSocket(listener *net.UnixListener) (master *os. // Accept the listener's connection. conn, err := listener.Accept() if err != nil { - return nil, errors.Wrap(err, "failed to get terminal master file descriptor from socket") + return nil, fmt.Errorf("failed to get terminal master file descriptor from socket: %w", err) } defer conn.Close() unixConn, ok := conn.(*net.UnixConn) @@ -53,10 +54,10 @@ func (*runcRuntime) getMasterFromSocket(listener *net.UnixListener) (master *os. // sent. n, oobn, _, _, err := unixConn.ReadMsgUnix(name, oob) if err != nil { - return nil, errors.Wrap(err, "failed to read message from unix socket") + return nil, fmt.Errorf("failed to read message from unix socket: %w", err) } if n >= maxNameLen || oobn != oobSpace { - return nil, errors.Errorf("read an invalid number of bytes (n=%d oobn=%d)", n, oobn) + return nil, fmt.Errorf("read an invalid number of bytes (n=%d oobn=%d)", n, oobn) } // Truncate the data returned from the message. @@ -66,26 +67,26 @@ func (*runcRuntime) getMasterFromSocket(listener *net.UnixListener) (master *os. // Parse the out-of-band data in the message. messages, err := unix.ParseSocketControlMessage(oob) if err != nil { - return nil, errors.Wrapf(err, "failed to parse socket control message for oob %v", oob) + return nil, fmt.Errorf("failed to parse socket control message for oob %v: %w", oob, err) } if len(messages) == 0 { return nil, errors.New("did not receive any socket control messages") } if len(messages) > 1 { - return nil, errors.Errorf("received more than one socket control message: received %d", len(messages)) + return nil, fmt.Errorf("received more than one socket control message: received %d", len(messages)) } message := messages[0] // Parse the file descriptor out of the out-of-band data in the message. fds, err := unix.ParseUnixRights(&message) if err != nil { - return nil, errors.Wrapf(err, "failed to parse file descriptors out of message %v", message) + return nil, fmt.Errorf("failed to parse file descriptors out of message %v: %w", message, err) } if len(fds) == 0 { return nil, errors.New("did not receive any file descriptors") } if len(fds) > 1 { - return nil, errors.Errorf("received more than one file descriptor: received %d", len(fds)) + return nil, fmt.Errorf("received more than one file descriptor: received %d", len(fds)) } fd := uintptr(fds[0]) @@ -101,7 +102,7 @@ func (*runcRuntime) pathExists(pathToCheck string) (bool, error) { if os.IsNotExist(err) { return false, nil } - return false, errors.Wrapf(err, "failed call to Stat for path %s", pathToCheck) + return false, fmt.Errorf("failed call to Stat for path %s: %w", pathToCheck, err) } return true, nil } diff --git a/internal/guest/runtime/runc/runc.go b/internal/guest/runtime/runc/runc.go index 555fd17a7e..3ab8760c09 100644 --- a/internal/guest/runtime/runc/runc.go +++ b/internal/guest/runtime/runc/runc.go @@ -5,6 +5,7 @@ package runc import ( "encoding/json" + "fmt" "os" "path" "path/filepath" @@ -13,7 +14,7 @@ import ( "syscall" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "golang.org/x/sys/unix" "github.com/Microsoft/hcsshim/internal/guest/commonutils" @@ -57,7 +58,7 @@ func (r *runcRuntime) initialize() error { return err } if err := os.MkdirAll(p, 0700); err != nil { - return errors.Wrapf(err, "failed making runC container files directory %s", p) + return fmt.Errorf("failed making runC container files directory %s: %w", p, err) } } } @@ -84,11 +85,11 @@ func (*runcRuntime) ListContainerStates() ([]runtime.ContainerState, error) { out, err := cmd.CombinedOutput() if err != nil { runcErr := parseRuncError(string(out)) - return nil, errors.Wrapf(runcErr, "runc list failed with %v: %s", err, string(out)) + return nil, fmt.Errorf("runc list failed with %v: %s: %w", err, string(out), runcErr) } var states []runtime.ContainerState if err := json.Unmarshal(out, &states); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal the states for the container list") + return nil, fmt.Errorf("failed to unmarshal the states for the container list: %w", err) } return states, nil } @@ -100,11 +101,11 @@ func (*runcRuntime) getRunningPids(id string) ([]int, error) { out, err := cmd.CombinedOutput() if err != nil { runcErr := parseRuncError(string(out)) - return nil, errors.Wrapf(runcErr, "runc ps failed with %v: %s", err, string(out)) + return nil, fmt.Errorf("runc ps failed with %v: %s: %w", err, string(out), runcErr) } var pids []int if err := json.Unmarshal(out, &pids); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal pids for container %s", id) + return nil, fmt.Errorf("failed to unmarshal pids for container %s: %w", id, err) } return pids, nil } @@ -116,7 +117,7 @@ func (*runcRuntime) getProcessCommand(pid int) ([]string, error) { // with a null character after every argument. e.g. "ping google.com " data, err := os.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "cmdline")) if err != nil { - return nil, errors.Wrapf(err, "failed to read cmdline file for process %d", pid) + return nil, fmt.Errorf("failed to read cmdline file for process %d: %w", pid, err) } // Get rid of the \0 character at end. cmdString := strings.TrimSuffix(string(data), "\x00") @@ -139,11 +140,11 @@ func (*runcRuntime) pidMapToProcessStates(pidMap map[int]*runtime.ContainerProce func (r *runcRuntime) waitOnProcess(pid int) (int, error) { process, err := os.FindProcess(pid) if err != nil { - return -1, errors.Wrapf(err, "failed to find process %d", pid) + return -1, fmt.Errorf("failed to find process %d: %w", pid, err) } state, err := process.Wait() if err != nil { - return -1, errors.Wrapf(err, "failed waiting on process %d", pid) + return -1, fmt.Errorf("failed waiting on process %d: %w", pid, err) } status := state.Sys().(syscall.WaitStatus) @@ -212,12 +213,12 @@ func ociSpecFromBundle(bundlePath string) (*oci.Spec, error) { configPath := filepath.Join(bundlePath, "config.json") configFile, err := os.Open(configPath) if err != nil { - return nil, errors.Wrapf(err, "failed to open bundle config at %s", configPath) + return nil, fmt.Errorf("failed to open bundle config at %s: %w", configPath, err) } defer configFile.Close() var spec *oci.Spec if err := commonutils.DecodeJSONWithHresult(configFile, &spec); err != nil { - return nil, errors.Wrap(err, "failed to parse OCI spec") + return nil, fmt.Errorf("failed to parse OCI spec: %w", err) } return spec, nil } diff --git a/internal/guest/runtime/runc/utils.go b/internal/guest/runtime/runc/utils.go index 38535ccbab..ef118b831c 100644 --- a/internal/guest/runtime/runc/utils.go +++ b/internal/guest/runtime/runc/utils.go @@ -5,6 +5,8 @@ package runc import ( "encoding/json" + "errors" + "fmt" "os" "os/exec" "path/filepath" @@ -12,7 +14,6 @@ import ( "strings" "syscall" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/guest/runtime" @@ -22,11 +23,11 @@ import ( func (r *runcRuntime) readPidFile(pidFile string) (pid int, err error) { data, err := os.ReadFile(pidFile) if err != nil { - return -1, errors.Wrap(err, "failed reading from pid file") + return -1, fmt.Errorf("failed reading from pid file: %w", err) } pid, err = strconv.Atoi(string(data)) if err != nil { - return -1, errors.Wrapf(err, "failed converting pid text %q to integer form", data) + return -1, fmt.Errorf("failed converting pid text %q to integer form: %w", data, err) } return pid, nil } @@ -35,7 +36,7 @@ func (r *runcRuntime) readPidFile(pidFile string) (pid int, err error) { func (r *runcRuntime) cleanupContainer(id string) error { containerDir := r.getContainerDir(id) if err := os.RemoveAll(containerDir); err != nil { - return errors.Wrapf(err, "failed removing the container directory for container %s", id) + return fmt.Errorf("failed removing the container directory for container %s: %w", id, err) } return nil } @@ -44,7 +45,7 @@ func (r *runcRuntime) cleanupContainer(id string) error { func (r *runcRuntime) cleanupProcess(id string, pid int) error { processDir := r.getProcessDir(id, pid) if err := os.RemoveAll(processDir); err != nil { - return errors.Wrapf(err, "failed removing the process directory for process %d in container %s", pid, id) + return fmt.Errorf("failed removing the process directory for process %d in container %s: %w", pid, id, err) } return nil } @@ -65,7 +66,7 @@ func (*runcRuntime) getContainerDir(id string) string { func (r *runcRuntime) makeContainerDir(id string) error { dir := r.getContainerDir(id) if err := os.MkdirAll(dir, os.ModeDir); err != nil { - return errors.Wrapf(err, "failed making container directory for container %s", id) + return fmt.Errorf("failed making container directory for container %s: %w", id, err) } return nil } @@ -79,7 +80,7 @@ func (r *runcRuntime) getLogDir(id string) string { func (r *runcRuntime) makeLogDir(id string) error { dir := r.getLogDir(id) if err := os.MkdirAll(dir, os.ModeDir); err != nil { - return errors.Wrapf(err, "failed making runc log directory for container %s", id) + return fmt.Errorf("failed making runc log directory for container %s: %w", id, err) } return nil } @@ -118,7 +119,7 @@ type standardLogEntry struct { func (l *standardLogEntry) asError() (err error) { err = parseRuncError(l.Message) if l.Err != nil { - err = errors.Wrapf(err, l.Err.Error()) + err = fmt.Errorf(l.Err.Error()+": %w", err) } return } diff --git a/internal/guest/stdio/connection.go b/internal/guest/stdio/connection.go index 0ab2485acd..78f25ef9ad 100644 --- a/internal/guest/stdio/connection.go +++ b/internal/guest/stdio/connection.go @@ -4,10 +4,11 @@ package stdio import ( + "fmt" "os" "github.com/Microsoft/hcsshim/internal/guest/transport" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) @@ -75,7 +76,7 @@ func Connect(tport transport.Transport, settings ConnectionSettings) (_ *Connect if settings.StdIn != nil { c, err := tport.Dial(*settings.StdIn) if err != nil { - return nil, errors.Wrap(err, "failed creating stdin Connection") + return nil, fmt.Errorf("failed creating stdin Connection: %w", err) } connSet.In = &logConnection{ con: c, @@ -85,7 +86,7 @@ func Connect(tport transport.Transport, settings ConnectionSettings) (_ *Connect if settings.StdOut != nil { c, err := tport.Dial(*settings.StdOut) if err != nil { - return nil, errors.Wrap(err, "failed creating stdout Connection") + return nil, fmt.Errorf("failed creating stdout Connection: %w", err) } connSet.Out = &logConnection{ con: c, @@ -95,7 +96,7 @@ func Connect(tport transport.Transport, settings ConnectionSettings) (_ *Connect if settings.StdErr != nil { c, err := tport.Dial(*settings.StdErr) if err != nil { - return nil, errors.Wrap(err, "failed creating stderr Connection") + return nil, fmt.Errorf("failed creating stderr Connection: %w", err) } connSet.Err = &logConnection{ con: c, diff --git a/internal/guest/stdio/stdio.go b/internal/guest/stdio/stdio.go index 9352bc58fc..24b99dbba7 100644 --- a/internal/guest/stdio/stdio.go +++ b/internal/guest/stdio/stdio.go @@ -4,13 +4,15 @@ package stdio import ( + "errors" + "fmt" "io" "os" "strings" "sync" "github.com/Microsoft/hcsshim/internal/guest/transport" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) @@ -25,19 +27,19 @@ func (s *ConnectionSet) Close() error { var err error if s.In != nil { if cerr := s.In.Close(); cerr != nil { - err = errors.Wrap(cerr, "failed Close on stdin") + err = fmt.Errorf("failed Close on stdin: %w", cerr) } s.In = nil } if s.Out != nil { if cerr := s.Out.Close(); cerr != nil && err == nil { - err = errors.Wrap(cerr, "failed Close on stdout") + err = fmt.Errorf("failed Close on stdout: %w", cerr) } s.Out = nil } if s.Err != nil { if cerr := s.Err.Close(); cerr != nil && err == nil { - err = errors.Wrap(cerr, "failed Close on stderr") + err = fmt.Errorf("failed Close on stderr: %w", cerr) } s.Err = nil } @@ -55,19 +57,19 @@ func (fs *FileSet) Close() error { var err error if fs.In != nil { if cerr := fs.In.Close(); cerr != nil { - err = errors.Wrap(cerr, "failed Close on stdin") + err = fmt.Errorf("failed Close on stdin: %w", cerr) } fs.In = nil } if fs.Out != nil { if cerr := fs.Out.Close(); cerr != nil && err == nil { - err = errors.Wrap(cerr, "failed Close on stdout") + err = fmt.Errorf("failed Close on stdout: %w", cerr) } fs.Out = nil } if fs.Err != nil { if cerr := fs.Err.Close(); cerr != nil && err == nil { - err = errors.Wrap(cerr, "failed Close on stderr") + err = fmt.Errorf("failed Close on stderr: %w", cerr) } fs.Err = nil } @@ -86,19 +88,19 @@ func (s *ConnectionSet) Files() (_ *FileSet, err error) { if s.In != nil { fs.In, err = s.In.File() if err != nil { - return nil, errors.Wrap(err, "failed to dup stdin socket for command") + return nil, fmt.Errorf("failed to dup stdin socket for command: %w", err) } } if s.Out != nil { fs.Out, err = s.Out.File() if err != nil { - return nil, errors.Wrap(err, "failed to dup stdout socket for command") + return nil, fmt.Errorf("failed to dup stdout socket for command: %w", err) } } if s.Err != nil { fs.Err, err = s.Err.File() if err != nil { - return nil, errors.Wrap(err, "failed to dup stderr socket for command") + return nil, fmt.Errorf("failed to dup stderr socket for command: %w", err) } } return fs, nil @@ -117,19 +119,19 @@ func NewPipeRelay(s *ConnectionSet) (_ *PipeRelay, err error) { if s == nil || s.In != nil { pr.pipes[0], pr.pipes[1], err = os.Pipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stdin pipe relay") + return nil, fmt.Errorf("failed to create stdin pipe relay: %w", err) } } if s == nil || s.Out != nil { pr.pipes[2], pr.pipes[3], err = os.Pipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stdout pipe relay") + return nil, fmt.Errorf("failed to create stdout pipe relay: %w", err) } } if s == nil || s.Err != nil { pr.pipes[4], pr.pipes[5], err = os.Pipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stderr pipe relay") + return nil, fmt.Errorf("failed to create stderr pipe relay: %w", err) } } return pr, nil diff --git a/internal/guest/stdio/tty.go b/internal/guest/stdio/tty.go index a05a2cb15d..f65ae7a1b6 100644 --- a/internal/guest/stdio/tty.go +++ b/internal/guest/stdio/tty.go @@ -9,7 +9,6 @@ import ( "syscall" "unsafe" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -18,7 +17,7 @@ import ( func NewConsole() (*os.File, string, error) { master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) if err != nil { - return nil, "", errors.Wrap(err, "failed to open master pseudoterminal file") + return nil, "", fmt.Errorf("failed to open master pseudoterminal file: %w", err) } console, err := ptsname(master) if err != nil { @@ -29,10 +28,10 @@ func NewConsole() (*os.File, string, error) { } // TODO: Do we need to keep this chmod call? if err := os.Chmod(console, 0600); err != nil { - return nil, "", errors.Wrap(err, "failed to change permissions on the slave pseudoterminal file") + return nil, "", fmt.Errorf("failed to change permissions on the slave pseudoterminal file: %w", err) } if err := os.Chown(console, 0, 0); err != nil { - return nil, "", errors.Wrap(err, "failed to change ownership on the slave pseudoterminal file") + return nil, "", fmt.Errorf("failed to change ownership on the slave pseudoterminal file: %w", err) } return master, console, nil } @@ -62,7 +61,7 @@ func ioctl(fd uintptr, flag, data uintptr) error { func ptsname(f *os.File) (string, error) { var n int32 if err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { - return "", errors.Wrap(err, "ioctl TIOCGPTN failed for ptsname") + return "", fmt.Errorf("ioctl TIOCGPTN failed for ptsname: %w", err) } return fmt.Sprintf("/dev/pts/%d", n), nil } @@ -72,7 +71,7 @@ func ptsname(f *os.File) (string, error) { func unlockpt(f *os.File) error { var u int32 if err := ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != nil { - return errors.Wrap(err, "ioctl TIOCSPTLCK failed for unlockpt") + return fmt.Errorf("ioctl TIOCSPTLCK failed for unlockpt: %w", err) } return nil } diff --git a/internal/guest/storage/crypt/crypt.go b/internal/guest/storage/crypt/crypt.go index 00585f9509..133a98b61b 100644 --- a/internal/guest/storage/crypt/crypt.go +++ b/internal/guest/storage/crypt/crypt.go @@ -13,7 +13,6 @@ import ( "time" "github.com/Microsoft/hcsshim/internal/log" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -96,7 +95,8 @@ func cryptsetupFormat(ctx context.Context, source string, keyFilePath string) er // supposed to derive a strong key from it. In our case, we already pass // a strong key to cryptsetup, so we don't need a strong KDF. Ideally, // it would be bypassed completely, but this isn't possible. - "--pbkdf", "pbkdf2", "--pbkdf-force-iterations", "1000"} + "--pbkdf", "pbkdf2", "--pbkdf-force-iterations", "1000", + } return cryptsetupCommand(ctx, formatArgs) } @@ -107,7 +107,8 @@ func cryptsetupOpen(ctx context.Context, source string, deviceName string, keyFi // Open device with the key passed to luksFormat "luksOpen", source, deviceName, "--key-file", keyFilePath, // Don't use a journal to increase performance - "--integrity-no-journal", "--persistent"} + "--integrity-no-journal", "--persistent", + } return cryptsetupCommand(ctx, openArgs) } @@ -148,7 +149,7 @@ func EncryptDevice(ctx context.Context, source string, dmCryptName string) (path // Create temporary directory to store the keyfile and xfs image tempDir, err := _osMkdirTemp("", "dm-crypt") if err != nil { - return "", errors.Wrapf(err, "failed to create temporary folder: %s", source) + return "", fmt.Errorf("failed to create temporary folder: %w", err) } defer func() { diff --git a/internal/guest/storage/crypt/crypt_test.go b/internal/guest/storage/crypt/crypt_test.go index b3db41d8a0..4049e3bd44 100644 --- a/internal/guest/storage/crypt/crypt_test.go +++ b/internal/guest/storage/crypt/crypt_test.go @@ -5,9 +5,8 @@ package crypt import ( "context" + "errors" "testing" - - "github.com/pkg/errors" ) const tempDir = "/tmp/dir/" @@ -47,7 +46,7 @@ func Test_Encrypt_Generate_Key_Error(t *testing.T) { } _, err := EncryptDevice(context.Background(), source, "dm-crypt-target") - if errors.Unwrap(err) != expectedErr { //nolint:errorlint + if !errors.Is(err, expectedErr) { t.Fatalf("expected err: '%v' got: '%v'", expectedErr, err) } } @@ -80,7 +79,7 @@ func Test_Encrypt_Cryptsetup_Format_Error(t *testing.T) { } _, err := EncryptDevice(context.Background(), expectedSource, "dm-crypt-target") - if errors.Unwrap(err) != expectedErr { //nolint:errorlint + if !errors.Is(err, expectedErr) { t.Fatalf("expected err: '%v' got: '%v", expectedErr, err) } } @@ -120,7 +119,7 @@ func Test_Encrypt_Cryptsetup_Open_Error(t *testing.T) { } _, err := EncryptDevice(context.Background(), expectedSource, dmCryptName) - if errors.Unwrap(err) != expectedErr { //nolint:errorlint + if !errors.Is(err, expectedErr) { t.Fatalf("expected err: '%v' got: '%v'", expectedErr, err) } } @@ -175,7 +174,7 @@ func Test_Cleanup_Dm_Crypt_Error(t *testing.T) { } err := CleanupCryptDevice(context.TODO(), dmCryptName) - if errors.Unwrap(err) != expectedErr { //nolint:errorlint + if !errors.Is(err, expectedErr) { t.Fatalf("expected err: '%v' got: '%v'", expectedErr, err) } } diff --git a/internal/guest/storage/mount.go b/internal/guest/storage/mount.go index a3d10a3b25..a0f91644f9 100644 --- a/internal/guest/storage/mount.go +++ b/internal/guest/storage/mount.go @@ -6,21 +6,22 @@ package storage import ( "bufio" "context" - gerrors "errors" + "errors" "fmt" "os" "strings" "syscall" - "github.com/pkg/errors" "go.opencensus.io/trace" "golang.org/x/sys/unix" "github.com/Microsoft/hcsshim/internal/oc" ) -const procMountFile = "/proc/mounts" -const numProcMountFields = 6 +const ( + procMountFile = "/proc/mounts" + numProcMountFields = 6 +) // Test dependencies var ( @@ -128,14 +129,14 @@ func UnmountPath(ctx context.Context, target string, removeTarget bool) (err err if os.IsNotExist(err) { return nil } - return errors.Wrapf(err, "failed to determine if path '%s' exists", target) + return fmt.Errorf("failed to determine if path %q exists: %w", target, err) } if err := unixUnmount(target, 0); err != nil { // If `Unmount` returns `EINVAL` it's not mounted. Just delete the // folder. - if !gerrors.Is(err, unix.EINVAL) { - return errors.Wrapf(err, "failed to unmount path '%s'", target) + if !errors.Is(err, unix.EINVAL) { + return fmt.Errorf("failed to unmount path %q: %w", target, err) } } if removeTarget { diff --git a/internal/guest/storage/mount_test.go b/internal/guest/storage/mount_test.go index 22d4773b27..1414b660fe 100644 --- a/internal/guest/storage/mount_test.go +++ b/internal/guest/storage/mount_test.go @@ -5,10 +5,11 @@ package storage import ( "context" + "errors" + "fmt" "os" "testing" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -59,7 +60,7 @@ func Test_Unmount_Stat_OtherError_Error(t *testing.T) { return nil, expectedErr } err := UnmountPath(context.Background(), "/dev/fake", false) - if errors.Cause(err) != expectedErr { //nolint:errorlint + if !errors.Is(err, expectedErr) { t.Fatalf("expected err: %v, got: %v", expectedErr, err) } } @@ -129,7 +130,7 @@ func Test_Unmount_OtherError(t *testing.T) { return expectedErr } err := UnmountPath(context.Background(), "/dev/fake", false) - if errors.Cause(err) != expectedErr { //nolint:errorlint + if !errors.Is(err, expectedErr) { t.Fatalf("expected err: %v, got: %v", expectedErr, err) } } @@ -195,7 +196,7 @@ func Test_UnmountAllInPath_Unmount_Order(t *testing.T) { timesCalled := 0 unixUnmount = func(target string, flags int) error { if timesCalled == 0 && target != child { - return errors.Errorf("expected to unmount %v first, got %v", child, target) + return fmt.Errorf("expected to unmount %v first, got %v", child, target) } timesCalled += 1 return nil @@ -206,7 +207,6 @@ func Test_UnmountAllInPath_Unmount_Order(t *testing.T) { } err := UnmountAllInPath(context.Background(), parent, true) - if err != nil { t.Fatalf("expected nil error, got: %v", err) } diff --git a/internal/guest/storage/overlay/overlay.go b/internal/guest/storage/overlay/overlay.go index aa4877508f..96f97acb17 100644 --- a/internal/guest/storage/overlay/overlay.go +++ b/internal/guest/storage/overlay/overlay.go @@ -5,6 +5,7 @@ package overlay import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -13,7 +14,7 @@ import ( "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/memory" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "go.opencensus.io/trace" "golang.org/x/sys/unix" @@ -108,13 +109,13 @@ func Mount(ctx context.Context, basePaths []string, upperdirPath, workdirPath, t } if readonly && (upperdirPath != "" || workdirPath != "") { - return errors.Errorf("upperdirPath: %q, and workdirPath: %q must be empty when readonly==true", upperdirPath, workdirPath) + return fmt.Errorf("upperdirPath: %q, and workdirPath: %q must be empty when readonly==true", upperdirPath, workdirPath) } options := []string{"lowerdir=" + lowerdir} if upperdirPath != "" { if err := osMkdirAll(upperdirPath, 0755); err != nil { - return errors.Wrap(err, "failed to create upper directory in scratch space") + return fmt.Errorf("failed to create upper directory in scratch space: %w", err) } defer func() { if err != nil { @@ -125,7 +126,7 @@ func Mount(ctx context.Context, basePaths []string, upperdirPath, workdirPath, t } if workdirPath != "" { if err := osMkdirAll(workdirPath, 0755); err != nil { - return errors.Wrap(err, "failed to create workdir in scratch space") + return fmt.Errorf("failed to create workdir in scratch space: %w", err) } defer func() { if err != nil { @@ -135,7 +136,7 @@ func Mount(ctx context.Context, basePaths []string, upperdirPath, workdirPath, t options = append(options, "workdir="+workdirPath) } if err := osMkdirAll(target, 0755); err != nil { - return errors.Wrapf(err, "failed to create directory for container root filesystem %s", target) + return fmt.Errorf("failed to create directory for container root filesystem %s: %w", target, err) } defer func() { if err != nil { @@ -147,7 +148,7 @@ func Mount(ctx context.Context, basePaths []string, upperdirPath, workdirPath, t flags |= unix.MS_RDONLY } if err := unixMount("overlay", target, "overlay", flags, strings.Join(options, ",")); err != nil { - return errors.Wrapf(err, "failed to mount overlayfs at %s", target) + return fmt.Errorf("failed to mount overlayfs at %s: %w", target, err) } return nil } diff --git a/internal/guest/storage/plan9/plan9.go b/internal/guest/storage/plan9/plan9.go index 5c1f1d74f4..3df4bb2967 100644 --- a/internal/guest/storage/plan9/plan9.go +++ b/internal/guest/storage/plan9/plan9.go @@ -11,7 +11,6 @@ import ( "github.com/Microsoft/hcsshim/internal/guest/transport" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" "go.opencensus.io/trace" "golang.org/x/sys/unix" ) @@ -40,7 +39,7 @@ func Mount(ctx context.Context, vsock transport.Transport, target, share string, trace.Int64Attribute("port", int64(port)), trace.BoolAttribute("readonly", readonly)) - if err := osMkdirAll(target, 0700); err != nil { + if err := osMkdirAll(target, 0o700); err != nil { return err } defer func() { @@ -50,12 +49,12 @@ func Mount(ctx context.Context, vsock transport.Transport, target, share string, }() conn, err := vsock.Dial(port) if err != nil { - return errors.Wrapf(err, "could not connect to plan9 server for %s", target) + return fmt.Errorf("could not connect to plan9 server for %s: %w", target, err) } f, err := conn.File() conn.Close() if err != nil { - return errors.Wrapf(err, "could not get file for plan9 connection for %s", target) + return fmt.Errorf("could not get file for plan9 connection for %s: %w", target, err) } defer f.Close() @@ -72,14 +71,14 @@ func Mount(ctx context.Context, vsock transport.Transport, target, share string, // set socket options to maximize bandwidth err = syscall.SetsockoptInt(int(f.Fd()), syscall.SOL_SOCKET, syscall.SO_RCVBUF, packetPayloadBytes) if err != nil { - return errors.Wrapf(err, "failed to set sock option syscall.SO_RCVBUF to %v on fd %v", packetPayloadBytes, f.Fd()) + return fmt.Errorf("failed to set sock option syscall.SO_RCVBUF to %v on fd %v: %w", packetPayloadBytes, f.Fd(), err) } err = syscall.SetsockoptInt(int(f.Fd()), syscall.SOL_SOCKET, syscall.SO_SNDBUF, packetPayloadBytes) if err != nil { - return errors.Wrapf(err, "failed to set sock option syscall.SO_SNDBUF to %v on fd %v", packetPayloadBytes, f.Fd()) + return fmt.Errorf("failed to set sock option syscall.SO_SNDBUF to %v on fd %v: %w", packetPayloadBytes, f.Fd(), err) } if err := unixMount(target, target, "9p", mountOptions, data); err != nil { - return errors.Wrapf(err, "failed to mount directory for mapped directory %s", target) + return fmt.Errorf("failed to mount directory for mapped directory %s: %w", target, err) } return nil } diff --git a/internal/guest/storage/pmem/pmem.go b/internal/guest/storage/pmem/pmem.go index 52bf4fec88..a701988590 100644 --- a/internal/guest/storage/pmem/pmem.go +++ b/internal/guest/storage/pmem/pmem.go @@ -8,7 +8,6 @@ import ( "fmt" "os" - "github.com/pkg/errors" "go.opencensus.io/trace" "golang.org/x/sys/unix" @@ -38,7 +37,7 @@ const ( // mount mounts source to target via unix.Mount func mount(ctx context.Context, source, target string) (err error) { - if err := osMkdirAll(target, 0700); err != nil { + if err := osMkdirAll(target, 0o700); err != nil { return err } defer func() { @@ -51,7 +50,7 @@ func mount(ctx context.Context, source, target string) (err error) { flags := uintptr(unix.MS_RDONLY) if err := unixMount(source, target, "ext4", flags, "noload"); err != nil { - return errors.Wrapf(err, "failed to mount %s onto %s", source, target) + return fmt.Errorf("failed to mount %s onto %s: %w", source, target, err) } return nil } @@ -141,7 +140,7 @@ func Unmount( trace.StringAttribute("target", target)) if err := storage.UnmountPath(ctx, target, true); err != nil { - return errors.Wrapf(err, "failed to unmount target: %s", target) + return fmt.Errorf("failed to unmount target %s: %w", target, err) } if verityInfo != nil { diff --git a/internal/guest/storage/pmem/pmem_test.go b/internal/guest/storage/pmem/pmem_test.go index 4254d58950..0cc4127a4b 100644 --- a/internal/guest/storage/pmem/pmem_test.go +++ b/internal/guest/storage/pmem/pmem_test.go @@ -5,11 +5,11 @@ package pmem import ( "context" + "errors" "fmt" "os" "testing" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/Microsoft/hcsshim/internal/protocol/guestresource" @@ -33,7 +33,7 @@ func Test_Mount_Mkdir_Fails_Error(t *testing.T) { return expectedErr } err := Mount(context.Background(), 0, "", nil, nil) - if errors.Cause(err) != expectedErr { //nolint:errorlint + if !errors.Is(err, expectedErr) { t.Fatalf("expected err: %v, got: %v", expectedErr, err) } } @@ -70,8 +70,8 @@ func Test_Mount_Mkdir_ExpectedPerm(t *testing.T) { target := "/fake/path" osMkdirAll = func(path string, perm os.FileMode) error { - if perm != os.FileMode(0700) { - t.Errorf("expected perm: %v, got: %v", os.FileMode(0700), perm) + if perm != os.FileMode(0o700) { + t.Errorf("expected perm: %v, got: %v", os.FileMode(0o700), perm) return errors.New("unexpected perm") } return nil @@ -108,7 +108,7 @@ func Test_Mount_Calls_RemoveAll_OnMountFailure(t *testing.T) { return expectedErr } err := Mount(context.Background(), 0, target, nil, nil) - if errors.Cause(err) != expectedErr { //nolint:errorlint + if !errors.Is(err, expectedErr) { t.Fatalf("expected err: %v, got: %v", expectedErr, err) } if !removeAllCalled { diff --git a/internal/guest/storage/scsi/scsi.go b/internal/guest/storage/scsi/scsi.go index d063bd971a..8d800a2922 100644 --- a/internal/guest/storage/scsi/scsi.go +++ b/internal/guest/storage/scsi/scsi.go @@ -5,6 +5,7 @@ package scsi import ( "context" + "errors" "fmt" "io/fs" "os" @@ -14,7 +15,6 @@ import ( "strings" "time" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "go.opencensus.io/trace" "golang.org/x/sys/unix" @@ -134,7 +134,8 @@ func Mount( target string, readonly bool, options []string, - config *Config) (err error) { + config *Config, +) (err error) { spnCtx, span := oc.StartSpan(ctx, "scsi::Mount") defer span.End() defer func() { oc.SetSpanStatus(span, err) }() @@ -170,7 +171,7 @@ func Mount( // create and symlink block device mount target if config.BlockDev { parent := filepath.Dir(target) - if err := osMkdirAll(parent, 0700); err != nil { + if err := osMkdirAll(parent, 0o700); err != nil { return err } log.G(ctx).WithFields(logrus.Fields{ @@ -180,7 +181,7 @@ func Mount( return osSymlink(source, target) } - if err := osMkdirAll(target, 0700); err != nil { + if err := osMkdirAll(target, 0o700); err != nil { return err } defer func() { @@ -308,7 +309,7 @@ func Unmount( // unmount target if err := storageUnmountPath(ctx, target, true); err != nil { - return errors.Wrapf(err, "unmount failed: %s", target) + return fmt.Errorf("unmount %q failed: %w", target, err) } if config.VerityInfo != nil { @@ -366,7 +367,7 @@ func GetDevicePath(ctx context.Context, controller, lun uint8, partition uint64) } if len(deviceNames) > 1 { - return "", errors.Errorf("more than one block device could match SCSI ID \"%s\"", scsiID) + return "", fmt.Errorf("more than one block device could match SCSI ID %q", scsiID) } deviceName := deviceNames[0].Name() @@ -442,7 +443,7 @@ func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) { trace.Int64Attribute("lun", int64(lun))) scsiID := fmt.Sprintf("%d:0:0:%d", controller, lun) - f, err := os.OpenFile(filepath.Join(scsiDevicesPath, scsiID, "delete"), os.O_WRONLY, 0644) + f, err := os.OpenFile(filepath.Join(scsiDevicesPath, scsiID, "delete"), os.O_WRONLY, 0o644) if err != nil { if os.IsNotExist(err) { return nil diff --git a/internal/guest/storage/utilities.go b/internal/guest/storage/utilities.go index 2cbef13f08..2727ff0afa 100644 --- a/internal/guest/storage/utilities.go +++ b/internal/guest/storage/utilities.go @@ -8,8 +8,6 @@ import ( "fmt" "path/filepath" "time" - - "github.com/pkg/errors" ) // export this variable so it can be mocked to aid in testing for consuming packages @@ -26,13 +24,16 @@ func WaitForFileMatchingPattern(ctx context.Context, pattern string) (string, er if len(files) == 0 { select { case <-ctx.Done(): - return "", errors.Wrapf(ctx.Err(), "timed out waiting for file matching pattern %s to exist", pattern) + if err := ctx.Err(); err != nil { + return "", fmt.Errorf("timed out waiting for file matching pattern %s to exist: %w", pattern, err) + } + return "", nil default: time.Sleep(time.Millisecond * 10) continue } } else if len(files) > 1 { - return "", fmt.Errorf("more than one file could exist for pattern \"%s\"", pattern) + return "", fmt.Errorf("more than one file could exist for pattern %q", pattern) } return files[0], nil } diff --git a/internal/guest/transport/vsock.go b/internal/guest/transport/vsock.go index a95095f335..4a9ad4b837 100644 --- a/internal/guest/transport/vsock.go +++ b/internal/guest/transport/vsock.go @@ -4,13 +4,12 @@ package transport import ( - gerrors "errors" + "errors" "fmt" "syscall" "time" "github.com/linuxkit/virtsock/pkg/vsock" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -36,12 +35,11 @@ func (t *VsockTransport) Dial(port uint32) (Connection, error) { return conn, nil } // If the error was ETIMEDOUT retry, otherwise fail. - var errno syscall.Errno - if gerrors.As(err, &errno) && errno == syscall.ETIMEDOUT { + if errors.Is(err, syscall.ETIMEDOUT) { time.Sleep(100 * time.Millisecond) continue } else { - return nil, errors.Wrapf(err, "vsock Dial port (%d) failed", port) + return nil, fmt.Errorf("vsock Dial port (%d) failed: %w", port, err) } } return nil, fmt.Errorf("failed connecting the VsockConnection: can't connect after 10 attempts") diff --git a/internal/hcs/schema2/com_port.go b/internal/hcs/schema2/com_port.go index 8bf8cab60e..e46cae4364 100644 --- a/internal/hcs/schema2/com_port.go +++ b/internal/hcs/schema2/com_port.go @@ -9,7 +9,7 @@ package hcsschema -// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. +// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. type ComPort struct { NamedPipe string `json:"NamedPipe,omitempty"` diff --git a/internal/hcs/schema2/container_memory_information.go b/internal/hcs/schema2/container_memory_information.go index 1fd7ca5d56..a5384ff353 100644 --- a/internal/hcs/schema2/container_memory_information.go +++ b/internal/hcs/schema2/container_memory_information.go @@ -9,7 +9,7 @@ package hcsschema -// memory usage as viewed from within the container +// memory usage as viewed from within the container type ContainerMemoryInformation struct { TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"` diff --git a/internal/hcs/schema2/guest_connection_info.go b/internal/hcs/schema2/guest_connection_info.go index 8a369bab71..4a3851e95e 100644 --- a/internal/hcs/schema2/guest_connection_info.go +++ b/internal/hcs/schema2/guest_connection_info.go @@ -9,7 +9,7 @@ package hcsschema -// Information about the guest. +// Information about the guest. type GuestConnectionInfo struct { // Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities. diff --git a/internal/hcs/schema2/hv_socket_2.go b/internal/hcs/schema2/hv_socket_2.go index a017691f02..3f8aea5dd6 100644 --- a/internal/hcs/schema2/hv_socket_2.go +++ b/internal/hcs/schema2/hv_socket_2.go @@ -9,7 +9,7 @@ package hcsschema -// HvSocket configuration for a VM +// HvSocket configuration for a VM type HvSocket2 struct { HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"` } diff --git a/internal/hcs/schema2/hv_socket_address.go b/internal/hcs/schema2/hv_socket_address.go index 84c11b93ee..9ae843ae0a 100644 --- a/internal/hcs/schema2/hv_socket_address.go +++ b/internal/hcs/schema2/hv_socket_address.go @@ -9,8 +9,8 @@ package hcsschema -// This class defines address settings applied to a VM -// by the GCS every time a VM starts or restores. +// This class defines address settings applied to a VM +// by the GCS every time a VM starts or restores. type HvSocketAddress struct { LocalAddress string `json:"LocalAddress,omitempty"` ParentAddress string `json:"ParentAddress,omitempty"` diff --git a/internal/hcs/schema2/hv_socket_system_config.go b/internal/hcs/schema2/hv_socket_system_config.go index 69f4f9d39b..b1ee1d4a3b 100644 --- a/internal/hcs/schema2/hv_socket_system_config.go +++ b/internal/hcs/schema2/hv_socket_system_config.go @@ -9,7 +9,7 @@ package hcsschema -// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1. +// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1. type HvSocketSystemConfig struct { // SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds). diff --git a/internal/hcs/schema2/memory_stats.go b/internal/hcs/schema2/memory_stats.go index 906ba597f9..3065f21aca 100644 --- a/internal/hcs/schema2/memory_stats.go +++ b/internal/hcs/schema2/memory_stats.go @@ -9,7 +9,7 @@ package hcsschema -// Memory runtime statistics +// Memory runtime statistics type MemoryStats struct { MemoryUsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` diff --git a/internal/hcs/schema2/numa_setting.go b/internal/hcs/schema2/numa_setting.go index 3f27b2ca01..c77b940704 100644 --- a/internal/hcs/schema2/numa_setting.go +++ b/internal/hcs/schema2/numa_setting.go @@ -12,10 +12,10 @@ package hcsschema type NumaSetting struct { - VirtualNodeNumber uint32 `json:"VirtualNodeNumber,omitempty"` - PhysicalNodeNumber uint32 `json:"PhysicalNodeNumber,omitempty"` - VirtualSocketNumber uint32 `json:"VirtualSocketNumber,omitempty"` - CountOfProcessors uint32 `json:"CountOfProcessors,omitempty"` - CountOfMemoryBlocks uint64 `json:"CountOfMemoryBlocks,omitempty"` - MemoryBackingType MemoryBackingType `json:"MemoryBackingType,omitempty"` + VirtualNodeNumber uint32 `json:"VirtualNodeNumber,omitempty"` + PhysicalNodeNumber uint32 `json:"PhysicalNodeNumber,omitempty"` + VirtualSocketNumber uint32 `json:"VirtualSocketNumber,omitempty"` + CountOfProcessors uint32 `json:"CountOfProcessors,omitempty"` + CountOfMemoryBlocks uint64 `json:"CountOfMemoryBlocks,omitempty"` + MemoryBackingType MemoryBackingType `json:"MemoryBackingType,omitempty"` } diff --git a/internal/hcs/schema2/pause_notification.go b/internal/hcs/schema2/pause_notification.go index d96c9501f3..a9769c9345 100644 --- a/internal/hcs/schema2/pause_notification.go +++ b/internal/hcs/schema2/pause_notification.go @@ -9,7 +9,7 @@ package hcsschema -// Notification data that is indicated to components running in the Virtual Machine. +// Notification data that is indicated to components running in the Virtual Machine. type PauseNotification struct { Reason string `json:"Reason,omitempty"` } diff --git a/internal/hcs/schema2/pause_options.go b/internal/hcs/schema2/pause_options.go index 21707a88eb..d48e043421 100644 --- a/internal/hcs/schema2/pause_options.go +++ b/internal/hcs/schema2/pause_options.go @@ -9,7 +9,7 @@ package hcsschema -// Options for HcsPauseComputeSystem +// Options for HcsPauseComputeSystem type PauseOptions struct { SuspensionLevel string `json:"SuspensionLevel,omitempty"` diff --git a/internal/hcs/schema2/process_details.go b/internal/hcs/schema2/process_details.go index e9a662dd59..de56374b29 100644 --- a/internal/hcs/schema2/process_details.go +++ b/internal/hcs/schema2/process_details.go @@ -13,7 +13,7 @@ import ( "time" ) -// Information about a process running in a container +// Information about a process running in a container type ProcessDetails struct { ProcessId int32 `json:"ProcessId,omitempty"` diff --git a/internal/hcs/schema2/processor_stats.go b/internal/hcs/schema2/processor_stats.go index 6157e25225..db42a19ea6 100644 --- a/internal/hcs/schema2/processor_stats.go +++ b/internal/hcs/schema2/processor_stats.go @@ -9,7 +9,7 @@ package hcsschema -// CPU runtime statistics +// CPU runtime statistics type ProcessorStats struct { TotalRuntime100ns uint64 `json:"TotalRuntime100ns,omitempty"` diff --git a/internal/hcs/schema2/property_query.go b/internal/hcs/schema2/property_query.go index d6d80df131..c7ebd66092 100644 --- a/internal/hcs/schema2/property_query.go +++ b/internal/hcs/schema2/property_query.go @@ -9,7 +9,7 @@ package hcsschema -// By default the basic properties will be returned. This query provides a way to request specific properties. +// By default the basic properties will be returned. This query provides a way to request specific properties. type PropertyQuery struct { PropertyTypes []PropertyType `json:"PropertyTypes,omitempty"` } diff --git a/internal/hcs/schema2/silo_properties.go b/internal/hcs/schema2/silo_properties.go index 5eaf6a7f4a..eb7338c650 100644 --- a/internal/hcs/schema2/silo_properties.go +++ b/internal/hcs/schema2/silo_properties.go @@ -9,7 +9,7 @@ package hcsschema -// Silo job information +// Silo job information type SiloProperties struct { Enabled bool `json:"Enabled,omitempty"` diff --git a/internal/hcs/schema2/statistics.go b/internal/hcs/schema2/statistics.go index ba7a6b3963..6e2dce6f42 100644 --- a/internal/hcs/schema2/statistics.go +++ b/internal/hcs/schema2/statistics.go @@ -13,7 +13,7 @@ import ( "time" ) -// Runtime statistics for a container +// Runtime statistics for a container type Statistics struct { Timestamp time.Time `json:"Timestamp,omitempty"` diff --git a/internal/hcs/schema2/storage_stats.go b/internal/hcs/schema2/storage_stats.go index 4f042ffd93..36f190aec8 100644 --- a/internal/hcs/schema2/storage_stats.go +++ b/internal/hcs/schema2/storage_stats.go @@ -9,7 +9,7 @@ package hcsschema -// Storage runtime statistics +// Storage runtime statistics type StorageStats struct { ReadCountNormalized uint64 `json:"ReadCountNormalized,omitempty"` diff --git a/internal/hcs/schema2/topology.go b/internal/hcs/schema2/topology.go index 9cca85171e..a4dc866eb9 100644 --- a/internal/hcs/schema2/topology.go +++ b/internal/hcs/schema2/topology.go @@ -12,7 +12,7 @@ package hcsschema type Topology struct { - Memory *VirtualMachineMemory `json:"Memory,omitempty"` - Processor *VirtualMachineProcessor `json:"Processor,omitempty"` - Numa *Numa `json:"Numa,omitempty"` + Memory *VirtualMachineMemory `json:"Memory,omitempty"` + Processor *VirtualMachineProcessor `json:"Processor,omitempty"` + Numa *Numa `json:"Numa,omitempty"` } diff --git a/internal/hcs/schema2/virtual_machine.go b/internal/hcs/schema2/virtual_machine.go index 3f750466f8..0b66870ec6 100644 --- a/internal/hcs/schema2/virtual_machine.go +++ b/internal/hcs/schema2/virtual_machine.go @@ -15,15 +15,15 @@ package hcsschema type VirtualMachine struct { Version *Version `json:"Version,omitempty"` // When set to true, the virtual machine will treat a reset as a stop, releasing resources and cleaning up state. - StopOnReset bool `json:"StopOnReset,omitempty"` - Chipset *Chipset `json:"Chipset,omitempty"` - ComputeTopology *Topology `json:"ComputeTopology,omitempty"` - Devices *Devices `json:"Devices,omitempty"` - GuestState *GuestState `json:"GuestState,omitempty"` - RestoreState *RestoreState `json:"RestoreState,omitempty"` - RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` - StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` - DebugOptions *DebugOptions `json:"DebugOptions,omitempty"` - GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` - SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"` + StopOnReset bool `json:"StopOnReset,omitempty"` + Chipset *Chipset `json:"Chipset,omitempty"` + ComputeTopology *Topology `json:"ComputeTopology,omitempty"` + Devices *Devices `json:"Devices,omitempty"` + GuestState *GuestState `json:"GuestState,omitempty"` + RestoreState *RestoreState `json:"RestoreState,omitempty"` + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` + DebugOptions *DebugOptions `json:"DebugOptions,omitempty"` + GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` + SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"` } diff --git a/internal/hcs/schema2/virtual_machine_memory.go b/internal/hcs/schema2/virtual_machine_memory.go index 17573c92a5..d11fe37fde 100644 --- a/internal/hcs/schema2/virtual_machine_memory.go +++ b/internal/hcs/schema2/virtual_machine_memory.go @@ -15,7 +15,7 @@ type VirtualMachineMemory struct { SizeInMB uint64 `json:"SizeInMB,omitempty"` Backing *MemoryBackingType `json:"Backing,omitempty"` // If enabled, then the VM's memory is backed by the Windows pagefile rather than physically backed, statically allocated memory. - AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` // If enabled, then the memory hot hint feature is exposed to the VM, allowing it to prefetch pages into its working set. (if supported by the guest operating system). EnableHotHint bool `json:"EnableHotHint,omitempty"` // If enabled, then the memory cold hint feature is exposed to the VM, allowing it to trim zeroed pages from its working set (if supported by the guest operating system). @@ -27,7 +27,7 @@ type VirtualMachineMemory struct { // Low MMIO region allocated below 4GB LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` // High MMIO region allocated above 4GB (base and size) - HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` - HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` - SlitType *VirtualSlitType `json:"SlitType,omitempty"` + HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` + HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` + SlitType *VirtualSlitType `json:"SlitType,omitempty"` } diff --git a/internal/hcs/schema2/virtual_machine_processor.go b/internal/hcs/schema2/virtual_machine_processor.go index 619cd83400..ea1bd731fe 100644 --- a/internal/hcs/schema2/virtual_machine_processor.go +++ b/internal/hcs/schema2/virtual_machine_processor.go @@ -12,10 +12,10 @@ package hcsschema type VirtualMachineProcessor struct { - Count uint32 `json:"Count,omitempty"` - Limit uint64 `json:"Limit,omitempty"` - Weight uint64 `json:"Weight,omitempty"` - Reservation uint64 `json:"Reservation,omitempty"` - CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` - NumaProcessorsSettings *NumaProcessors `json:"NumaProcessorsSettings,omitempty"` + Count uint32 `json:"Count,omitempty"` + Limit uint64 `json:"Limit,omitempty"` + Weight uint64 `json:"Weight,omitempty"` + Reservation uint64 `json:"Reservation,omitempty"` + CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` + NumaProcessorsSettings *NumaProcessors `json:"NumaProcessorsSettings,omitempty"` } diff --git a/internal/hcs/schema2/virtual_pci_device.go b/internal/hcs/schema2/virtual_pci_device.go index a4a62da163..202338904f 100644 --- a/internal/hcs/schema2/virtual_pci_device.go +++ b/internal/hcs/schema2/virtual_pci_device.go @@ -12,6 +12,6 @@ package hcsschema // TODO: PropagateNumaAffinity is pre-release/experimental field in schema 2.11. Need to add build number // docs when a public build with this is out. type VirtualPciDevice struct { - Functions []VirtualPciFunction `json:",omitempty"` - PropagateNumaAffinity *bool `json:"PropagateNumaAffinity,omitempty"` + Functions []VirtualPciFunction `json:",omitempty"` + PropagateNumaAffinity *bool `json:"PropagateNumaAffinity,omitempty"` } diff --git a/internal/hcs/utils.go b/internal/hcs/utils.go index 76eb2be7cf..d8a0a43d1a 100644 --- a/internal/hcs/utils.go +++ b/internal/hcs/utils.go @@ -4,13 +4,13 @@ package hcs import ( "context" + "fmt" "io" "syscall" "github.com/Microsoft/go-winio" diskutil "github.com/Microsoft/go-winio/vhd" "github.com/Microsoft/hcsshim/computestorage" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -42,22 +42,22 @@ func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { // CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`. func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) { if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil { - return errors.Wrap(err, "failed to create VHD") + return fmt.Errorf("failed to create VHD: %w", err) } vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone) if err != nil { - return errors.Wrap(err, "failed to open VHD") + return fmt.Errorf("failed to open VHD: %w", err) } defer func() { err2 := windows.CloseHandle(windows.Handle(vhd)) if err == nil { - err = errors.Wrap(err2, "failed to close VHD") + err = fmt.Errorf("failed to close VHD: %w", err2) } }() if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil { - return errors.Wrap(err, "failed to format VHD") + return fmt.Errorf("failed to format VHD: %w", err) } return nil diff --git a/internal/hcsoci/devices.go b/internal/hcsoci/devices.go index cf6f45c273..2efa7982ea 100644 --- a/internal/hcsoci/devices.go +++ b/internal/hcsoci/devices.go @@ -10,7 +10,6 @@ import ( "path/filepath" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/Microsoft/hcsshim/internal/devices" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" @@ -30,7 +29,7 @@ func getSpecKernelDrivers(annots map[string]string) ([]string, error) { drivers := oci.ParseAnnotationCommaSeparated(annotations.VirtualMachineKernelDrivers, annots) for _, driver := range drivers { if _, err := os.Stat(driver); err != nil { - return nil, errors.Wrapf(err, "failed to find path to drivers at %s", driver) + return nil, fmt.Errorf("failed to find path to drivers at %s: %w", driver, err) } } return drivers, nil @@ -42,7 +41,7 @@ func getDeviceExtensionPaths(annots map[string]string) ([]string, error) { extensions := oci.ParseAnnotationCommaSeparated(annotations.DeviceExtensions, annots) for _, ext := range extensions { if _, err := os.Stat(ext); err != nil { - return nil, errors.Wrapf(err, "failed to find path to driver extensions at %s", ext) + return nil, fmt.Errorf("failed to find path to driver extensions at %s: %w", ext, err) } } return extensions, nil @@ -80,11 +79,11 @@ func getDeviceExtensions(annotations map[string]string) (*hcsschema.ContainerDef for _, extensionPath := range extensionPaths { data, err := os.ReadFile(extensionPath) if err != nil { - return nil, errors.Wrapf(err, "failed to read extension file at %s", extensionPath) + return nil, fmt.Errorf("failed to read extension file at %s: %w", extensionPath, err) } extension := hcsschema.DeviceExtension{} if err := json.Unmarshal(data, &extension); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal extension file at %s", extensionPath) + return nil, fmt.Errorf("failed to unmarshal extension file at %s: %w", extensionPath, err) } results.DeviceExtension = append(results.DeviceExtension, extension) } @@ -178,7 +177,7 @@ func handleAssignedDevicesLCOW( pciID, index := devices.GetDeviceInfoFromPath(d.ID) vpci, err := vm.AssignDevice(ctx, pciID, index, "") if err != nil { - return resultDevs, closers, errors.Wrapf(err, "failed to assign device %s, function %d to pod %s", pciID, index, vm.ID()) + return resultDevs, closers, fmt.Errorf("failed to assign device %s, function %d to pod %s: %w", pciID, index, vm.ID(), err) } closers = append(closers, vpci) @@ -187,7 +186,7 @@ func handleAssignedDevicesLCOW( d.ID = vpci.VMBusGUID resultDevs = append(resultDevs, d) } else { - return resultDevs, closers, errors.Errorf("specified device %s has unsupported type %s", d.ID, d.IDType) + return resultDevs, closers, fmt.Errorf("specified device %s has unsupported type %s", d.ID, d.IDType) } } diff --git a/internal/hcsoci/resources_lcow.go b/internal/hcsoci/resources_lcow.go index 633bde02eb..4ca92122a2 100644 --- a/internal/hcsoci/resources_lcow.go +++ b/internal/hcsoci/resources_lcow.go @@ -7,6 +7,7 @@ package hcsoci import ( "context" + "errors" "fmt" "os" "path" @@ -14,7 +15,6 @@ import ( "strings" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/Microsoft/hcsshim/internal/guestpath" "github.com/Microsoft/hcsshim/internal/layers" @@ -32,7 +32,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * log.G(ctx).Debug("hcsshim::allocateLinuxResources mounting storage") rootPath, scratchPath, closer, err := layers.MountLCOWLayers(ctx, coi.actualID, coi.LCOWLayers, containerRootInUVM, coi.HostingSystem) if err != nil { - return errors.Wrap(err, "failed to mount container storage") + return fmt.Errorf("failed to mount container storage: %w", err) } coi.Spec.Root.Path = rootPath // If this is the pause container in a hypervisor-isolated pod, we can skip cleanup of @@ -48,7 +48,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * uvmPathForContainersFileSystem := path.Join(r.ContainerRootInUVM(), guestpath.RootfsPath) share, err := coi.HostingSystem.AddPlan9(ctx, hostPath, uvmPathForContainersFileSystem, coi.Spec.Root.Readonly, false, nil) if err != nil { - return errors.Wrap(err, "adding plan9 root") + return fmt.Errorf("adding plan9 root: %w", err) } coi.Spec.Root.Path = uvmPathForContainersFileSystem r.Add(share) @@ -96,7 +96,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * &scsi.MountConfig{Options: mount.Options, BlockDev: isBlockDev}, ) if err != nil { - return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount) + return fmt.Errorf("adding SCSI physical disk mount %+v: %w", mount, err) } uvmPathForFile = scsiMount.GuestPath() r.Add(scsiMount) @@ -119,7 +119,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * &scsi.MountConfig{Options: mount.Options, BlockDev: isBlockDev}, ) if err != nil { - return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount) + return fmt.Errorf("adding SCSI virtual disk mount %+v: %w", mount, err) } uvmPathForFile = scsiMount.GuestPath() @@ -137,7 +137,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * // currently we only support 2M hugepage size hugePageSubDirs := strings.Split(strings.TrimPrefix(mount.Source, guestpath.HugePagesMountPrefix), "/") if len(hugePageSubDirs) < 2 { - return errors.Errorf( + return fmt.Errorf( `%s mount path is invalid, expected format: %s/`, mount.Source, guestpath.HugePagesMountPrefix, @@ -146,14 +146,14 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * // hugepages:// should be followed by pagesize if hugePageSubDirs[0] != "2M" { - return errors.Errorf(`only 2M (megabytes) pagesize is supported, got %s`, hugePageSubDirs[0]) + return fmt.Errorf(`only 2M (megabytes) pagesize is supported, got %s`, hugePageSubDirs[0]) } // Hugepages inside a container are backed by a mount created inside a UVM. uvmPathForFile = mount.Source } else { st, err := os.Stat(hostPath) if err != nil { - return errors.Wrap(err, "could not open bind mount target") + return fmt.Errorf("could not open bind mount target: %w", err) } restrictAccess := false var allowedNames []string @@ -170,7 +170,7 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * share, err := coi.HostingSystem.AddPlan9(ctx, hostPath, uvmPathForShare, readOnly, restrictAccess, allowedNames) if err != nil { - return errors.Wrapf(err, "adding plan9 mount %+v", mount) + return fmt.Errorf("adding plan9 mount %+v: %w", mount, err) } r.Add(share) } diff --git a/internal/hcsoci/resources_wcow.go b/internal/hcsoci/resources_wcow.go index 604ba8b550..90103f99e6 100644 --- a/internal/hcsoci/resources_wcow.go +++ b/internal/hcsoci/resources_wcow.go @@ -13,7 +13,6 @@ import ( "strings" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "github.com/Microsoft/hcsshim/internal/cmd" "github.com/Microsoft/hcsshim/internal/credentials" @@ -38,7 +37,7 @@ func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r log.G(ctx).Debug("hcsshim::allocateWindowsResources mounting storage") mountedLayers, closer, err := layers.MountWCOWLayers(ctx, coi.actualID, coi.HostingSystem, coi.WCOWLayers) if err != nil { - return errors.Wrap(err, "failed to mount container storage") + return fmt.Errorf("failed to mount container storage: %w", err) } coi.Spec.Root.Path = mountedLayers.RootFS coi.mountedWCOWLayers = mountedLayers @@ -75,7 +74,7 @@ func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r // an HvSocket service was not possible. hvSockConfig := ccgInstance.HvSocketConfig if err := coi.HostingSystem.UpdateHvSocketService(ctx, hvSockConfig.ServiceId, hvSockConfig.ServiceConfig); err != nil { - return errors.Wrap(err, "failed to update hvsocket service") + return fmt.Errorf("failed to update hvsocket service: %w", err) } } } @@ -202,13 +201,13 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R } exitCode, err := cmd.ExecInUvm(ctx, coi.HostingSystem, req) if err != nil { - return errors.Wrapf(err, "failed to create sandbox mount directory in utility VM with exit code %d %q", exitCode, b.String()) + return fmt.Errorf("failed to create sandbox mount directory in utility VM with exit code %d %q: %w", exitCode, b.String(), err) } } else { if uvm.IsPipe(mount.Source) { pipe, err := coi.HostingSystem.AddPipe(ctx, mount.Source) if err != nil { - return errors.Wrap(err, "failed to add named pipe to UVM") + return fmt.Errorf("failed to add named pipe to UVM: %w", err) } r.Add(pipe) } else { @@ -216,7 +215,7 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R options := coi.HostingSystem.DefaultVSMBOptions(readOnly) share, err := coi.HostingSystem.AddVSMB(ctx, mount.Source, options) if err != nil { - return errors.Wrapf(err, "failed to add VSMB share to utility VM for mount %+v", mount) + return fmt.Errorf("failed to add VSMB share to utility VM for mount %+v: %w", mount, err) } r.Add(share) } diff --git a/internal/jobcontainers/env.go b/internal/jobcontainers/env.go index 25f1bab64d..9193c95014 100644 --- a/internal/jobcontainers/env.go +++ b/internal/jobcontainers/env.go @@ -3,10 +3,10 @@ package jobcontainers import ( + "errors" "unicode/utf16" "unsafe" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) diff --git a/internal/jobcontainers/jobcontainer.go b/internal/jobcontainers/jobcontainer.go index 63cd709564..203932d3be 100644 --- a/internal/jobcontainers/jobcontainer.go +++ b/internal/jobcontainers/jobcontainer.go @@ -4,6 +4,7 @@ package jobcontainers import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -27,7 +28,7 @@ import ( "github.com/Microsoft/hcsshim/internal/resources" "github.com/Microsoft/hcsshim/internal/winapi" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "golang.org/x/sys/windows" ) @@ -277,7 +278,7 @@ func (c *JobContainer) CreateProcess(ctx context.Context, config interface{}) (_ env, err := defaultEnvBlock(c.token) if err != nil { - return nil, errors.Wrap(err, "failed to get default environment block") + return nil, fmt.Errorf("failed to get default environment block: %w", err) } // Convert environment map to a slice of environment variables in the form [Key1=val1, key2=val2] @@ -336,7 +337,7 @@ func (c *JobContainer) CreateProcess(ctx context.Context, config interface{}) (_ // "foo bar.exe" exists, then return: "\"foo bar\" baz" absPath, commandLine, err := getApplicationName(commandLine, workDir, strings.Trim(path, "PATH=")) if err != nil { - return nil, errors.Wrapf(err, "failed to get application name from commandline %q", conf.CommandLine) + return nil, fmt.Errorf("failed to get application name from commandline %q: %w", conf.CommandLine, err) } // exec.Cmd internally does its own path resolution and as part of this checks some well known file extensions on the file given (e.g. if @@ -346,7 +347,7 @@ func (c *JobContainer) CreateProcess(ctx context.Context, config interface{}) (_ // This is mostly to handle a common Kubernetes test image named agnhost that has the main entrypoint as a binary named agnhost with no extension. // https://github.com/kubernetes/kubernetes/blob/d64e91878517b1208a0bce7e2b7944645ace8ede/test/images/agnhost/Dockerfile_windows if err := os.Setenv("PATHEXT", ".COM;.EXE;.BAT;.CMD; "); err != nil { - return nil, errors.Wrap(err, "failed to set PATHEXT") + return nil, fmt.Errorf("failed to set PATHEXT: %w", err) } var cpty *conpty.Pty @@ -406,7 +407,7 @@ func (c *JobContainer) CreateProcess(ctx context.Context, config interface{}) (_ }() if err = process.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start host process") + return nil, fmt.Errorf("failed to start host process: %w", err) } // Assign the first process made as the init process of the container. @@ -486,7 +487,7 @@ func (c *JobContainer) Shutdown(ctx context.Context) error { func (c *JobContainer) shutdown(ctx context.Context) error { pids, err := c.job.Pids() if err != nil { - return errors.Wrap(err, "failed to get pids in container") + return fmt.Errorf("failed to get pids in container: %w", err) } if len(pids) == 0 { @@ -524,17 +525,17 @@ func (c *JobContainer) PropertiesV2(ctx context.Context, types ...hcsschema.Prop memInfo, err := c.job.QueryMemoryStats() if err != nil { - return nil, errors.Wrap(err, "failed to query for job containers memory information") + return nil, fmt.Errorf("failed to query for job containers memory information: %w", err) } processorInfo, err := c.job.QueryProcessorStats() if err != nil { - return nil, errors.Wrap(err, "failed to query for job containers processor information") + return nil, fmt.Errorf("failed to query for job containers processor information: %w", err) } storageInfo, err := c.job.QueryStorageStats() if err != nil { - return nil, errors.Wrap(err, "failed to query for job containers storage information") + return nil, fmt.Errorf("failed to query for job containers storage information: %w", err) } privateWorkingSet, err := c.job.QueryPrivateWorkingSet() @@ -593,7 +594,7 @@ func (c *JobContainer) Properties(ctx context.Context, types ...schema1.Property processList = append(processList, proc) }) if err != nil { - return nil, errors.Wrap(err, "failed to get process ") + return nil, fmt.Errorf("failed to get process : %w", err) } return &schema1.ContainerProperties{ProcessList: processList}, nil @@ -604,7 +605,7 @@ func (c *JobContainer) Terminate(ctx context.Context) error { log.G(ctx).WithField("id", c.id).Debug("terminating job container") if err := c.job.Terminate(1); err != nil { - return errors.Wrap(err, "failed to terminate job container") + return fmt.Errorf("failed to terminate job container: %w", err) } return nil } diff --git a/internal/jobcontainers/logon.go b/internal/jobcontainers/logon.go index c1e7d6b0e4..6623156b58 100644 --- a/internal/jobcontainers/logon.go +++ b/internal/jobcontainers/logon.go @@ -4,6 +4,7 @@ package jobcontainers import ( "context" + "errors" "fmt" "strings" "unsafe" @@ -11,7 +12,7 @@ import ( "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/winapi" - "github.com/pkg/errors" + "golang.org/x/sys/windows" ) @@ -162,7 +163,7 @@ func (c *JobContainer) processToken(ctx context.Context, userOrGroup string) (wi func openCurrentProcessToken() (windows.Token, error) { var token windows.Token if err := windows.OpenProcessToken(windows.CurrentProcess(), windows.TOKEN_ALL_ACCESS, &token); err != nil { - return 0, errors.Wrap(err, "failed to open current process token") + return 0, fmt.Errorf("failed to open current process token: %w", err) } return token, nil } diff --git a/internal/jobcontainers/mounts.go b/internal/jobcontainers/mounts.go index 1b32e47e4b..110ed2088d 100644 --- a/internal/jobcontainers/mounts.go +++ b/internal/jobcontainers/mounts.go @@ -4,6 +4,7 @@ package jobcontainers import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -13,7 +14,7 @@ import ( "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/logfields" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) @@ -52,11 +53,11 @@ func fallbackMountSetup(spec *specs.Spec, sandboxVolumePath string) error { // Make sure all of the dirs leading up to the full path exist. strippedCtrPath := filepath.Dir(fullCtrPath) if err := os.MkdirAll(strippedCtrPath, 0777); err != nil { - return errors.Wrap(err, "failed to make directory for job container mount") + return fmt.Errorf("failed to make directory for job container mount: %w", err) } if err := os.Symlink(mount.Source, fullCtrPath); err != nil { - return errors.Wrap(err, "failed to setup mount for job container") + return fmt.Errorf("failed to setup mount for job container: %w", err) } } return nil diff --git a/internal/jobcontainers/path.go b/internal/jobcontainers/path.go index 4cef14e003..86cd9d465a 100644 --- a/internal/jobcontainers/path.go +++ b/internal/jobcontainers/path.go @@ -3,12 +3,13 @@ package jobcontainers import ( + "errors" "fmt" "os" "strings" "github.com/Microsoft/hcsshim/internal/winapi" - "github.com/pkg/errors" + "golang.org/x/sys/windows" ) @@ -224,13 +225,13 @@ func getSystemPaths() (string, error) { var searchPath string systemDir, err := windows.GetSystemDirectory() if err != nil { - return "", errors.Wrap(err, "failed to get system directory") + return "", fmt.Errorf("failed to get system directory: %w", err) } searchPath += systemDir + ";" windowsDir, err := windows.GetWindowsDirectory() if err != nil { - return "", errors.Wrap(err, "failed to get Windows directory") + return "", fmt.Errorf("failed to get Windows directory: %w", err) } searchPath += windowsDir + "\\System;" + windowsDir + ";" diff --git a/internal/jobcontainers/process.go b/internal/jobcontainers/process.go index cf3318f3b7..cde4ed9af5 100644 --- a/internal/jobcontainers/process.go +++ b/internal/jobcontainers/process.go @@ -4,12 +4,12 @@ package jobcontainers import ( "context" + "errors" "fmt" "io" "os" "sync" - "github.com/pkg/errors" "golang.org/x/sys/windows" "github.com/Microsoft/hcsshim/internal/conpty" @@ -100,7 +100,7 @@ func (p *JobProcess) Signal(ctx context.Context, options interface{}) (bool, err // The process we are signaling has stopped. Return a proper error that signals this condition. return false, fmt.Errorf("failed to send signal: %w", hcs.ErrProcessAlreadyStopped) } - return false, errors.Wrap(err, "failed to send signal") + return false, fmt.Errorf("failed to send signal: %w", err) } return true, nil } @@ -111,7 +111,7 @@ func (p *JobProcess) CloseStdin(ctx context.Context) error { defer p.stdioLock.Unlock() if p.stdin != nil { if err := p.stdin.Close(); err != nil { - return errors.Wrap(err, "failed to close job container stdin") + return fmt.Errorf("failed to close job container stdin: %w", err) } p.stdin = nil } @@ -124,7 +124,7 @@ func (p *JobProcess) CloseStdout(ctx context.Context) error { defer p.stdioLock.Unlock() if p.stdout != nil { if err := p.stdout.Close(); err != nil { - return errors.Wrap(err, "failed to close job container stdout") + return fmt.Errorf("failed to close job container stdout: %w", err) } p.stdout = nil } @@ -137,7 +137,7 @@ func (p *JobProcess) CloseStderr(ctx context.Context) error { defer p.stdioLock.Unlock() if p.stderr != nil { if err := p.stderr.Close(); err != nil { - return errors.Wrap(err, "failed to close job container stderr") + return fmt.Errorf("failed to close job container stderr: %w", err) } p.stderr = nil } @@ -251,7 +251,7 @@ func (p *JobProcess) exited() bool { func signalProcess(pid uint32, signal int) error { hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid) if err != nil { - return errors.Wrap(err, "failed to open process") + return fmt.Errorf("failed to open process: %w", err) } defer func() { _ = windows.Close(hProc) @@ -267,7 +267,7 @@ func signalProcess(pid uint32, signal int) error { // Note: This is a hack which is not officially supported. k32, err := windows.LoadLibrary("kernel32.dll") if err != nil { - return errors.Wrap(err, "failed to load kernel32 library") + return fmt.Errorf("failed to load kernel32 library: %w", err) } defer func() { _ = windows.FreeLibrary(k32) @@ -275,12 +275,12 @@ func signalProcess(pid uint32, signal int) error { proc, err := windows.GetProcAddress(k32, "CtrlRoutine") if err != nil { - return errors.Wrap(err, "failed to load CtrlRoutine") + return fmt.Errorf("failed to load CtrlRoutine: %w", err) } threadHandle, err := winapi.CreateRemoteThread(hProc, nil, 0, proc, uintptr(signal), 0, nil) if err != nil { - return errors.Wrapf(err, "failed to open remote thread in target process %d", pid) + return fmt.Errorf("failed to open remote thread in target process %d: %w", pid, err) } defer func() { _ = windows.Close(threadHandle) diff --git a/internal/layers/lcow.go b/internal/layers/lcow.go index 2a6da2a622..076af5e4ff 100644 --- a/internal/layers/lcow.go +++ b/internal/layers/lcow.go @@ -6,12 +6,13 @@ package layers import ( "context" "encoding/json" + "errors" "fmt" "path/filepath" "strings" "github.com/containerd/containerd/api/types" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/guestpath" diff --git a/internal/layers/wcow_mount.go b/internal/layers/wcow_mount.go index a5f706f940..1d63723987 100644 --- a/internal/layers/wcow_mount.go +++ b/internal/layers/wcow_mount.go @@ -5,12 +5,12 @@ package layers import ( "context" + "errors" "fmt" "os" "path/filepath" "time" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" @@ -135,7 +135,7 @@ func mountProcessIsolatedWCIFSLayers(ctx context.Context, l *wcowWCIFSLayers) (_ // If we got unlucky and ran into one of the two errors mentioned five times in a row and left the loop, we need to check // the loop error here and fail also. if lErr != nil { - return nil, nil, errors.Wrap(lErr, "layer retry loop failed") + return nil, nil, fmt.Errorf("layer retry loop failed: %w", lErr) } // If any of the below fails, we want to detach the filter and unmount the disk. @@ -409,7 +409,7 @@ func MountSandboxVolume(ctx context.Context, hostPath, volumeName string) (err e } if err = windows.SetVolumeMountPoint(windows.StringToUTF16Ptr(hostPath), windows.StringToUTF16Ptr(volumeName)); err != nil { - return errors.Wrapf(err, "failed to mount sandbox volume to %s on host", hostPath) + return fmt.Errorf("failed to mount sandbox volume to %s on host: %w", hostPath, err) } return nil } @@ -421,10 +421,10 @@ func RemoveSandboxMountPoint(ctx context.Context, hostPath string) error { }).Debug("removing volume mount point for container") if err := windows.DeleteVolumeMountPoint(windows.StringToUTF16Ptr(hostPath)); err != nil { - return errors.Wrap(err, "failed to delete sandbox volume mount point") + return fmt.Errorf("failed to delete sandbox volume mount point: %w", err) } if err := os.Remove(hostPath); err != nil { - return errors.Wrapf(err, "failed to remove sandbox mounted folder path %q", hostPath) + return fmt.Errorf("failed to remove sandbox mounted folder path %q: %w", hostPath, err) } return nil } diff --git a/internal/memory/pool.go b/internal/memory/pool.go index 6d39ca3bf9..a6550214a0 100644 --- a/internal/memory/pool.go +++ b/internal/memory/pool.go @@ -1,7 +1,7 @@ package memory import ( - "github.com/pkg/errors" + "errors" ) const ( @@ -69,8 +69,10 @@ type PoolAllocator struct { pools [memoryClassNumber]*memoryPool } -var _ MappedRegion = ®ion{} -var _ Allocator = &PoolAllocator{} +var ( + _ MappedRegion = ®ion{} + _ Allocator = &PoolAllocator{} +) func (r *region) Offset() uint64 { return r.offset diff --git a/internal/memory/types.go b/internal/memory/types.go index d6cdb8cc4c..8bb8166b1e 100644 --- a/internal/memory/types.go +++ b/internal/memory/types.go @@ -1,6 +1,8 @@ package memory -import "github.com/pkg/errors" +import ( + "errors" +) type classType uint32 diff --git a/internal/ncproxy/store/store.go b/internal/ncproxy/store/store.go index 678eec4409..2c3bc243f8 100644 --- a/internal/ncproxy/store/store.go +++ b/internal/ncproxy/store/store.go @@ -3,9 +3,11 @@ package store import ( "context" "encoding/json" + "errors" + "fmt" ncproxynetworking "github.com/Microsoft/hcsshim/internal/ncproxy/networking" - "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" ) @@ -33,14 +35,14 @@ func (n *NetworkingStore) GetNetworkByName(ctx context.Context, networkName stri if err := n.db.View(func(tx *bolt.Tx) error { bkt := getNetworkBucket(tx) if bkt == nil { - return errors.Wrapf(ErrBucketNotFound, "network bucket %v", bucketKeyNetwork) + return fmt.Errorf("network bucket %v: %w", bucketKeyNetwork, ErrBucketNotFound) } data := bkt.Get([]byte(networkName)) if data == nil { - return errors.Wrapf(ErrKeyNotFound, "network %v", networkName) + return fmt.Errorf("network %v: %w", networkName, ErrKeyNotFound) } if err := json.Unmarshal(data, internalData); err != nil { - return errors.Wrapf(err, "data is %v", string(data)) + return fmt.Errorf("data is %v: %w", string(data), err) } return nil }); err != nil { @@ -70,7 +72,7 @@ func (n *NetworkingStore) DeleteNetwork(ctx context.Context, networkName string) if err := n.db.Update(func(tx *bolt.Tx) error { bkt := getNetworkBucket(tx) if bkt == nil { - return errors.Wrapf(ErrBucketNotFound, "bucket %v", bucketKeyNetwork) + return fmt.Errorf("bucket %v: %w", bucketKeyNetwork, ErrBucketNotFound) } return bkt.Delete([]byte(networkName)) }); err != nil { @@ -83,12 +85,12 @@ func (n *NetworkingStore) ListNetworks(ctx context.Context) (results []*ncproxyn if err := n.db.View(func(tx *bolt.Tx) error { bkt := getNetworkBucket(tx) if bkt == nil { - return errors.Wrapf(ErrBucketNotFound, "network bucket %v", bucketKeyNetwork) + return fmt.Errorf("network bucket %v: %w", bucketKeyNetwork, ErrBucketNotFound) } err := bkt.ForEach(func(k, v []byte) error { internalData := &ncproxynetworking.Network{} if err := json.Unmarshal(v, internalData); err != nil { - return errors.Wrapf(err, "data is %v", string(v)) + return fmt.Errorf("data is %v: %w", string(v), err) } results = append(results, internalData) return nil @@ -106,11 +108,11 @@ func (n *NetworkingStore) GetEndpointByName(ctx context.Context, endpointName st if err := n.db.View(func(tx *bolt.Tx) error { bkt := getEndpointBucket(tx) if bkt == nil { - return errors.Wrapf(ErrBucketNotFound, "endpoint bucket %v", bucketKeyEndpoint) + return fmt.Errorf("endpoint bucket %v: %w", bucketKeyEndpoint, ErrBucketNotFound) } jsonData := bkt.Get([]byte(endpointName)) if jsonData == nil { - return errors.Wrapf(ErrKeyNotFound, "endpoint %v", endpointName) + return fmt.Errorf("endpoint %v: %w", endpointName, ErrKeyNotFound) } if err := json.Unmarshal(jsonData, endpt); err != nil { return err @@ -152,7 +154,7 @@ func (n *NetworkingStore) DeleteEndpoint(ctx context.Context, endpointName strin if err := n.db.Update(func(tx *bolt.Tx) error { bkt := getEndpointBucket(tx) if bkt == nil { - return errors.Wrapf(ErrBucketNotFound, "bucket %v", bucketKeyEndpoint) + return fmt.Errorf("bucket %v: %w", bucketKeyEndpoint, ErrBucketNotFound) } return bkt.Delete([]byte(endpointName)) }); err != nil { @@ -165,7 +167,7 @@ func (n *NetworkingStore) ListEndpoints(ctx context.Context) (results []*ncproxy if err := n.db.View(func(tx *bolt.Tx) error { bkt := getEndpointBucket(tx) if bkt == nil { - return errors.Wrapf(ErrBucketNotFound, "endpoint bucket %v", bucketKeyEndpoint) + return fmt.Errorf("endpoint bucket %v: %w", bucketKeyEndpoint, ErrBucketNotFound) } err := bkt.ForEach(func(k, v []byte) error { endptInternal := &ncproxynetworking.Endpoint{} @@ -203,11 +205,11 @@ func (c *ComputeAgentStore) GetComputeAgent(ctx context.Context, containerID str if err := c.db.View(func(tx *bolt.Tx) error { bkt := getComputeAgentBucket(tx) if bkt == nil { - return errors.Wrapf(ErrBucketNotFound, "bucket %v", bucketKeyComputeAgent) + return fmt.Errorf("bucket %v: %w", bucketKeyComputeAgent, ErrBucketNotFound) } data := bkt.Get([]byte(containerID)) if data == nil { - return errors.Wrapf(ErrKeyNotFound, "key %v", containerID) + return fmt.Errorf("key %v: %w", containerID, ErrKeyNotFound) } result = string(data) return nil @@ -226,7 +228,7 @@ func (c *ComputeAgentStore) GetComputeAgents(ctx context.Context) (map[string]st if err := c.db.View(func(tx *bolt.Tx) error { bkt := getComputeAgentBucket(tx) if bkt == nil { - return errors.Wrapf(ErrBucketNotFound, "bucket %v", bucketKeyComputeAgent) + return fmt.Errorf("bucket %v: %w", bucketKeyComputeAgent, ErrBucketNotFound) } err := bkt.ForEach(func(k, v []byte) error { content[string(k)] = string(v) @@ -260,7 +262,7 @@ func (c *ComputeAgentStore) DeleteComputeAgent(ctx context.Context, containerID if err := c.db.Update(func(tx *bolt.Tx) error { bkt := getComputeAgentBucket(tx) if bkt == nil { - return errors.Wrapf(ErrBucketNotFound, "bucket %v", bucketKeyComputeAgent) + return fmt.Errorf("bucket %v: %w", bucketKeyComputeAgent, ErrBucketNotFound) } return bkt.Delete([]byte(containerID)) }); err != nil { diff --git a/internal/regopolicyinterpreter/regopolicyinterpreter.go b/internal/regopolicyinterpreter/regopolicyinterpreter.go index 030171c78d..a780eb7d62 100644 --- a/internal/regopolicyinterpreter/regopolicyinterpreter.go +++ b/internal/regopolicyinterpreter/regopolicyinterpreter.go @@ -5,6 +5,7 @@ import ( "context" _ "embed" "encoding/json" + "errors" "fmt" "log" "os" @@ -14,7 +15,6 @@ import ( "github.com/open-policy-agent/opa/rego" "github.com/open-policy-agent/opa/storage/inmem" "github.com/open-policy-agent/opa/topdown" - "github.com/pkg/errors" ) type LogLevel int @@ -312,7 +312,7 @@ func (r *RegoPolicyInterpreter) EnableLogging(path string, level LogLevel) error r.compiledModules = nil r.logLevel = level - file, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666) + file, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { return err } @@ -561,7 +561,6 @@ func (r *RegoPolicyInterpreter) RawQuery(rule string, input map[string]interface } resultSet, err := r.query(rule, input) - if err != nil { return nil, err } @@ -585,7 +584,6 @@ func (r *RegoPolicyInterpreter) Query(rule string, input map[string]interface{}) } rawResult, err := r.query(rule, input) - if err != nil { return nil, err } diff --git a/internal/tools/networkagent/defs.go b/internal/tools/networkagent/defs.go index a5e31d40ca..c6f7047382 100644 --- a/internal/tools/networkagent/defs.go +++ b/internal/tools/networkagent/defs.go @@ -4,10 +4,10 @@ package main import ( "encoding/json" + "errors" + "fmt" "os" - "github.com/pkg/errors" - ncproxygrpc "github.com/Microsoft/hcsshim/pkg/ncproxy/ncproxygrpc/v1" nodenetsvcV0 "github.com/Microsoft/hcsshim/pkg/ncproxy/nodenetsvc/v0" nodenetsvc "github.com/Microsoft/hcsshim/pkg/ncproxy/nodenetsvc/v1" @@ -58,7 +58,7 @@ type config struct { func readConfig(path string) (*config, error) { data, err := os.ReadFile(path) if err != nil { - return nil, errors.Wrap(err, "failed to read config file") + return nil, fmt.Errorf("failed to read config file: %w", err) } conf := &config{} if err := json.Unmarshal(data, conf); err != nil { diff --git a/internal/uvm/computeagent.go b/internal/uvm/computeagent.go index 7516cdf6ea..14341a097a 100644 --- a/internal/uvm/computeagent.go +++ b/internal/uvm/computeagent.go @@ -4,12 +4,14 @@ package uvm import ( "context" + "errors" + "fmt" "strings" "github.com/Microsoft/go-winio" "github.com/containerd/ttrpc" typeurl "github.com/containerd/typeurl/v2" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -126,7 +128,7 @@ func (ca *computeAgent) AddNIC(ctx context.Context, req *computeagent.AddNICInte case *hcn.HostComputeEndpoint: hnsEndpoint, err := hnsGetHNSEndpointByName(endpt.Name) if err != nil { - return nil, errors.Wrapf(err, "failed to get endpoint with name %q", endpt.Name) + return nil, fmt.Errorf("failed to get endpoint with name %q: %w", endpt.Name, err) } if err := ca.uvm.AddEndpointToNSWithID(ctx, hnsEndpoint.Namespace.ID, req.NicID, hnsEndpoint); err != nil { return nil, err @@ -160,7 +162,7 @@ func (ca *computeAgent) ModifyNIC(ctx context.Context, req *computeagent.ModifyN case *hcn.HostComputeEndpoint: hnsEndpoint, err := hnsGetHNSEndpointByName(endpt.Name) if err != nil { - return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", endpt.Name) + return nil, fmt.Errorf("failed to get endpoint with name `%s`: %w", endpt.Name, err) } moderationValue := hcsschema.InterruptModerationValue(req.IovPolicySettings.InterruptModeration) @@ -179,7 +181,7 @@ func (ca *computeAgent) ModifyNIC(ctx context.Context, req *computeagent.ModifyN } if err := ca.uvm.UpdateNIC(ctx, req.NicID, nic); err != nil { - return nil, errors.Wrap(err, "failed to update UVM's network adapter") + return nil, fmt.Errorf("failed to update UVM's network adapter: %w", err) } default: return nil, status.Error(codes.InvalidArgument, "invalid request endpoint type") @@ -216,7 +218,7 @@ func (ca *computeAgent) DeleteNIC(ctx context.Context, req *computeagent.DeleteN case *hcn.HostComputeEndpoint: hnsEndpoint, err := hnsGetHNSEndpointByName(endpt.Name) if err != nil { - return nil, errors.Wrapf(err, "failed to get endpoint with name %q", endpt.Name) + return nil, fmt.Errorf("failed to get endpoint with name %q: %w", endpt.Name, err) } if err := ca.uvm.RemoveEndpointFromNS(ctx, hnsEndpoint.Namespace.ID, hnsEndpoint); err != nil { return nil, err @@ -232,7 +234,7 @@ func setupAndServe(ctx context.Context, caAddr string, vm *UtilityVM) error { // Setup compute agent service l, err := winio.ListenPipe(caAddr, nil) if err != nil { - return errors.Wrapf(err, "failed to listen on %s", caAddr) + return fmt.Errorf("failed to listen on %s: %w", caAddr, err) } s, err := ttrpc.NewServer(ttrpc.WithUnaryServerInterceptor(octtrpc.ServerInterceptor())) if err != nil { diff --git a/internal/uvm/create_lcow.go b/internal/uvm/create_lcow.go index dcd830b85c..ff5172d17e 100644 --- a/internal/uvm/create_lcow.go +++ b/internal/uvm/create_lcow.go @@ -5,6 +5,7 @@ package uvm import ( "context" "encoding/base64" + "errors" "fmt" "maps" "net" @@ -15,7 +16,7 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/pkg/securitypolicy" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "go.opencensus.io/trace" @@ -757,12 +758,12 @@ func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcs st, stErr := os.Stat(rootfsFullPath) if stErr != nil { - return nil, errors.Wrapf(stErr, "failed to stat rootfs: %q", rootfsFullPath) + return nil, fmt.Errorf("failed to stat rootfs: %q: %w", rootfsFullPath, stErr) } devSize := pageAlign(uint64(st.Size())) memReg, pErr := pmem.Allocate(devSize) if pErr != nil { - return nil, errors.Wrap(pErr, "failed to allocate memory for rootfs") + return nil, fmt.Errorf("failed to allocate memory for rootfs: %w", pErr) } defer func() { if err != nil { @@ -774,7 +775,7 @@ func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcs dev := newVPMemMappedDevice(opts.RootFSFile, "/", devSize, memReg) if err := pmem.mapVHDLayer(ctx, dev); err != nil { - return nil, errors.Wrapf(err, "failed to save internal state for a multi-mapped rootfs device") + return nil, fmt.Errorf("failed to save internal state for a multi-mapped rootfs device: %w", err) } uvm.vpmemDevicesMultiMapped[0] = pmem } else { @@ -946,7 +947,7 @@ func CreateLCOW(ctx context.Context, opts *OptionsLCOW) (_ *UtilityVM, err error } if err = verifyOptions(ctx, opts); err != nil { - return nil, errors.Wrap(err, errBadUVMOpts.Error()) + return nil, fmt.Errorf(errBadUVMOpts.Error()+": %w", err) } // HCS config for SNP isolated vm is quite different to the usual case diff --git a/internal/uvm/create_wcow.go b/internal/uvm/create_wcow.go index 05b4a77aae..a9fdc5bd56 100644 --- a/internal/uvm/create_wcow.go +++ b/internal/uvm/create_wcow.go @@ -11,7 +11,7 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" - "github.com/pkg/errors" + "go.opencensus.io/trace" "github.com/Microsoft/hcsshim/internal/gcs" @@ -305,7 +305,7 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error }() if err := verifyOptions(ctx, opts); err != nil { - return nil, errors.Wrap(err, errBadUVMOpts.Error()) + return nil, fmt.Errorf(errBadUVMOpts.Error()+": %w", err) } doc, err := prepareConfigDoc(ctx, uvm, opts) @@ -314,7 +314,7 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error } if err := wclayer.GrantVmAccess(ctx, uvm.id, opts.BootFiles.ScratchVHDPath); err != nil { - return nil, errors.Wrap(err, "failed to grant vm access to scratch") + return nil, fmt.Errorf("failed to grant vm access to scratch: %w", err) } doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} diff --git a/internal/uvm/network.go b/internal/uvm/network.go index 18090f1ec5..811b1f31fc 100644 --- a/internal/uvm/network.go +++ b/internal/uvm/network.go @@ -4,6 +4,7 @@ package uvm import ( "context" + "errors" "fmt" "os" "slices" @@ -12,7 +13,7 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" "github.com/containerd/ttrpc" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/hcn" @@ -133,7 +134,7 @@ func (n *ncproxyClient) Close() error { func (uvm *UtilityVM) GetNCProxyClient() (*ncproxyClient, error) { conn, err := winio.DialPipe(uvm.ncProxyClientAddress, nil) if err != nil { - return nil, errors.Wrap(err, "failed to connect to ncproxy service") + return nil, fmt.Errorf("failed to connect to ncproxy service: %w", err) } raw := ttrpc.NewClient(conn, ttrpc.WithOnClose(func() { conn.Close() })) return &ncproxyClient{raw, ncproxyttrpc.NewNetworkConfigProxyClient(raw)}, nil @@ -247,7 +248,7 @@ func NewExternalNetworkSetup(ctx context.Context, vm *UtilityVM, caAddr, contain func (e *externalNetworkSetup) ConfigureNetworking(ctx context.Context, namespaceID string, configType NetworkConfigType) error { client, err := e.vm.GetNCProxyClient() if err != nil { - return errors.Wrapf(err, "no ncproxy client for UVM %q", e.vm.ID()) + return fmt.Errorf("no ncproxy client for UVM %q: %w", e.vm.ID(), err) } defer client.Close() diff --git a/internal/uvm/share.go b/internal/uvm/share.go index d7b90a1b2f..8f6070f409 100644 --- a/internal/uvm/share.go +++ b/internal/uvm/share.go @@ -11,14 +11,13 @@ import ( hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/protocol/guestresource" - "github.com/pkg/errors" ) func (uvm *UtilityVM) AddVsmbAndGetSharePath(ctx context.Context, reqHostPath, reqUVMPath string, readOnly bool) (*VSMBShare, string, error) { options := uvm.DefaultVSMBOptions(readOnly) vsmbShare, err := uvm.AddVSMB(ctx, reqHostPath, options) if err != nil { - return nil, "", errors.Wrapf(err, "failed to add mount as vSMB share to UVM") + return nil, "", fmt.Errorf("failed to add mount as vSMB share to UVM: %w", err) } defer func() { if err != nil { @@ -28,7 +27,7 @@ func (uvm *UtilityVM) AddVsmbAndGetSharePath(ctx context.Context, reqHostPath, r sharePath, err := uvm.GetVSMBUvmPath(ctx, reqHostPath, readOnly) if err != nil { - return nil, "", errors.Wrapf(err, "failed to get vsmb path") + return nil, "", fmt.Errorf("failed to get vsmb path: %w", err) } return vsmbShare, sharePath, nil diff --git a/internal/uvm/stats.go b/internal/uvm/stats.go index 2cd5c24ce0..f1b044838c 100644 --- a/internal/uvm/stats.go +++ b/internal/uvm/stats.go @@ -4,11 +4,13 @@ package uvm import ( "context" + "errors" + "fmt" "strings" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/go-winio/pkg/process" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sys/windows" @@ -79,7 +81,7 @@ func lookupVMMEM(ctx context.Context, vmID guid.GUID) (proc windows.Handle, err pids, err := process.EnumProcesses() if err != nil { - return 0, errors.Wrap(err, "failed to enumerate processes") + return 0, fmt.Errorf("failed to enumerate processes: %w", err) } for _, pid := range pids { p, err := checkProcess(ctx, pid, "vmmem", "NT VIRTUAL MACHINE", vmIDStr) diff --git a/internal/uvm/vpmem.go b/internal/uvm/vpmem.go index c04f523d1c..58717ce531 100644 --- a/internal/uvm/vpmem.go +++ b/internal/uvm/vpmem.go @@ -4,10 +4,10 @@ package uvm import ( "context" + "errors" "fmt" "os" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" @@ -134,7 +134,7 @@ func (uvm *UtilityVM) addVPMemDefault(ctx context.Context, hostPath string) (_ s } if err := uvm.modify(ctx, modification); err != nil { - return "", errors.Errorf("uvm::addVPMemDefault: failed to modify utility VM configuration: %s", err) + return "", fmt.Errorf("uvm::addVPMemDefault: failed to modify utility VM configuration: %s", err) } uvm.vpmemDevicesDefault[deviceNumber] = newDefaultVPMemInfo(hostPath, uvmPath) @@ -168,7 +168,7 @@ func (uvm *UtilityVM) removeVPMemDefault(ctx context.Context, hostPath string) e }, } if err := uvm.modify(ctx, modification); err != nil { - return errors.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err) + return fmt.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err) } log.G(ctx).WithFields(logrus.Fields{ "hostPath": device.hostPath, diff --git a/internal/uvm/vpmem_mapped.go b/internal/uvm/vpmem_mapped.go index 3513873e8b..1234db9c0e 100644 --- a/internal/uvm/vpmem_mapped.go +++ b/internal/uvm/vpmem_mapped.go @@ -4,10 +4,10 @@ package uvm import ( "context" + "errors" "fmt" "os" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" @@ -112,7 +112,7 @@ func newMappedVPMemModifyRequest( } case guestrequest.RequestTypeRemove: if pmem == nil { - return nil, errors.Errorf("no device found at location %d", deviceNumber) + return nil, fmt.Errorf("no device found at location %d", deviceNumber) } request.ResourcePath = fmt.Sprintf(resourcepaths.VPMemDeviceResourceFormat, deviceNumber, md.mappedRegion.Offset()) default: @@ -255,7 +255,7 @@ func (uvm *UtilityVM) addVPMemMappedDevice(ctx context.Context, hostPath string) md := newVPMemMappedDevice(hostPath, uvmPath, devSize, memReg) modification, err := newMappedVPMemModifyRequest(ctx, guestrequest.RequestTypeAdd, deviceNumber, md, uvm) if err := uvm.modify(ctx, modification); err != nil { - return "", errors.Errorf("uvm::addVPMemMappedDevice: failed to modify utility VM configuration: %s", err) + return "", fmt.Errorf("uvm::addVPMemMappedDevice: failed to modify utility VM configuration: %s", err) } defer func() { if err != nil { @@ -268,7 +268,7 @@ func (uvm *UtilityVM) addVPMemMappedDevice(ctx context.Context, hostPath string) pmem := uvm.vpmemDevicesMultiMapped[deviceNumber] if err := pmem.mapVHDLayer(ctx, md); err != nil { - return "", errors.Wrapf(err, "failed to update internal state") + return "", fmt.Errorf("failed to update internal state: %w", err) } return uvmPath, nil } @@ -303,7 +303,7 @@ func (uvm *UtilityVM) removeVPMemMappedDevice(ctx context.Context, hostPath stri } if err := uvm.modify(ctx, modification); err != nil { - return errors.Errorf("failed to remove packed VPMem %s from UVM %s: %s", md.hostPath, uvm.id, err) + return fmt.Errorf("failed to remove packed VPMem %s from UVM %s: %s", md.hostPath, uvm.id, err) } pmem := uvm.vpmemDevicesMultiMapped[devNum] diff --git a/internal/verity/verity.go b/internal/verity/verity.go index 795a6427e8..251b6134ae 100644 --- a/internal/verity/verity.go +++ b/internal/verity/verity.go @@ -9,7 +9,7 @@ import ( "github.com/Microsoft/hcsshim/ext4/tar2ext4" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/protocol/guestresource" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) @@ -36,7 +36,7 @@ func ReadVeritySuperBlock(ctx context.Context, layerPath string) (*guestresource dmvsb, err := dmverity.ReadDMVerityInfo(layerPath, ext4SizeInBytes) if err != nil { - return nil, errors.Wrap(err, "failed to read dm-verity super block") + return nil, fmt.Errorf("failed to read dm-verity super block: %w", err) } log.G(ctx).WithFields(logrus.Fields{ "layerPath": layerPath, diff --git a/internal/vm/hcs/boot.go b/internal/vm/hcs/boot.go index a1794a4f2c..2644fe96bc 100644 --- a/internal/vm/hcs/boot.go +++ b/internal/vm/hcs/boot.go @@ -3,9 +3,9 @@ package hcs import ( + "errors" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" ) func (uvmb *utilityVMBuilder) SetUEFIBoot(dir string, path string, args string) error { diff --git a/internal/vm/hcs/builder.go b/internal/vm/hcs/builder.go index b3f6026c9b..ea6a6792a6 100644 --- a/internal/vm/hcs/builder.go +++ b/internal/vm/hcs/builder.go @@ -4,12 +4,12 @@ package hcs import ( "context" + "fmt" "github.com/Microsoft/hcsshim/internal/hcs" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/internal/vm" - "github.com/pkg/errors" ) var _ vm.UVMBuilder = &utilityVMBuilder{} @@ -65,7 +65,7 @@ func NewUVMBuilder(id string, owner string, guestOS vm.GuestOS) (vm.UVMBuilder, func (uvmb *utilityVMBuilder) Create(ctx context.Context) (_ vm.UVM, err error) { cs, err := hcs.CreateComputeSystem(ctx, uvmb.id, uvmb.doc) if err != nil { - return nil, errors.Wrap(err, "failed to create hcs compute system") + return nil, fmt.Errorf("failed to create hcs compute system: %w", err) } defer func() { diff --git a/internal/vm/hcs/hcs.go b/internal/vm/hcs/hcs.go index ebf56a7cb6..e4ea80641c 100644 --- a/internal/vm/hcs/hcs.go +++ b/internal/vm/hcs/hcs.go @@ -4,13 +4,14 @@ package hcs import ( "context" + "fmt" "sync" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/hcs" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/vm" - "github.com/pkg/errors" + "golang.org/x/sys/windows" ) @@ -33,28 +34,28 @@ func (uvm *utilityVM) ID() string { func (uvm *utilityVM) Start(ctx context.Context) (err error) { if err := uvm.cs.Start(ctx); err != nil { - return errors.Wrap(err, "failed to start utility VM") + return fmt.Errorf("failed to start utility VM: %w", err) } return nil } func (uvm *utilityVM) Stop(ctx context.Context) error { if err := uvm.cs.Terminate(ctx); err != nil { - return errors.Wrap(err, "failed to terminate utility VM") + return fmt.Errorf("failed to terminate utility VM: %w", err) } return nil } func (uvm *utilityVM) Pause(ctx context.Context) error { if err := uvm.cs.Pause(ctx); err != nil { - return errors.Wrap(err, "failed to pause utility VM") + return fmt.Errorf("failed to pause utility VM: %w", err) } return nil } func (uvm *utilityVM) Resume(ctx context.Context) error { if err := uvm.cs.Resume(ctx); err != nil { - return errors.Wrap(err, "failed to resume utility VM") + return fmt.Errorf("failed to resume utility VM: %w", err) } return nil } @@ -64,7 +65,7 @@ func (uvm *utilityVM) Save(ctx context.Context) error { SaveType: "AsTemplate", } if err := uvm.cs.Save(ctx, saveOptions); err != nil { - return errors.Wrap(err, "failed to save utility VM state") + return fmt.Errorf("failed to save utility VM state: %w", err) } return nil } diff --git a/internal/vm/hcs/scsi.go b/internal/vm/hcs/scsi.go index ad2582d16e..3df37b5da2 100644 --- a/internal/vm/hcs/scsi.go +++ b/internal/vm/hcs/scsi.go @@ -4,11 +4,10 @@ package hcs import ( "context" + "errors" "fmt" "strconv" - "github.com/pkg/errors" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" diff --git a/internal/vm/hcs/serial.go b/internal/vm/hcs/serial.go index ef6541c5c1..4fdc5c04e4 100644 --- a/internal/vm/hcs/serial.go +++ b/internal/vm/hcs/serial.go @@ -3,11 +3,11 @@ package hcs import ( + "errors" "strconv" "strings" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/pkg/errors" ) func (uvmb *utilityVMBuilder) SetSerialConsole(port uint32, listenerPath string) error { diff --git a/internal/vm/hcs/stats.go b/internal/vm/hcs/stats.go index 8cd3edbc3e..ef731bffbd 100644 --- a/internal/vm/hcs/stats.go +++ b/internal/vm/hcs/stats.go @@ -4,6 +4,8 @@ package hcs import ( "context" + "errors" + "fmt" "strings" "github.com/Microsoft/go-winio/pkg/guid" @@ -12,7 +14,7 @@ import ( hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/vm" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) @@ -81,7 +83,7 @@ func lookupVMMEM(ctx context.Context, vmID guid.GUID) (proc windows.Handle, err pids, err := process.EnumProcesses() if err != nil { - return 0, errors.Wrap(err, "failed to enumerate processes") + return 0, fmt.Errorf("failed to enumerate processes: %w", err) } for _, pid := range pids { p, err := checkProcess(ctx, pid, "vmmem", "NT VIRTUAL MACHINE", vmIDStr) diff --git a/internal/vm/hcs/vmsocket.go b/internal/vm/hcs/vmsocket.go index 49c50b88b8..fa2f2b7b80 100644 --- a/internal/vm/hcs/vmsocket.go +++ b/internal/vm/hcs/vmsocket.go @@ -4,12 +4,12 @@ package hcs import ( "context" + "errors" "net" "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/vm" - "github.com/pkg/errors" ) func (uvm *utilityVM) VMSocketListen(ctx context.Context, listenType vm.VMSocketType, connID interface{}) (net.Listener, error) { diff --git a/internal/vm/hcs/vpmem.go b/internal/vm/hcs/vpmem.go index 915a10e7f7..0bf73ba2da 100644 --- a/internal/vm/hcs/vpmem.go +++ b/internal/vm/hcs/vpmem.go @@ -4,11 +4,10 @@ package hcs import ( "context" + "errors" "fmt" "strconv" - "github.com/pkg/errors" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" diff --git a/internal/vm/remotevm/builder.go b/internal/vm/remotevm/builder.go index ed41d751a2..c94dcf9db8 100644 --- a/internal/vm/remotevm/builder.go +++ b/internal/vm/remotevm/builder.go @@ -4,12 +4,13 @@ package remotevm import ( "context" + "fmt" "io" "net" "os/exec" "github.com/containerd/ttrpc" - "github.com/pkg/errors" + "github.com/sirupsen/logrus" "google.golang.org/protobuf/types/known/emptypb" @@ -42,25 +43,25 @@ func NewUVMBuilder(ctx context.Context, id, owner, binPath, addr string, guestOS } job, err = jobobject.Create(ctx, opts) if err != nil { - return nil, errors.Wrap(err, "failed to create job object for remotevm process") + return nil, fmt.Errorf("failed to create job object for remotevm process: %w", err) } cmd := exec.Command(binPath, "--ttrpc", addr) p, err := cmd.StdoutPipe() if err != nil { - return nil, errors.Wrap(err, "failed to create stdout pipe") + return nil, fmt.Errorf("failed to create stdout pipe: %w", err) } if err := cmd.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start remotevm server process") + return nil, fmt.Errorf("failed to start remotevm server process: %w", err) } if err := job.Assign(uint32(cmd.Process.Pid)); err != nil { - return nil, errors.Wrap(err, "failed to assign remotevm process to job") + return nil, fmt.Errorf("failed to assign remotevm process to job: %w", err) } if err := job.SetTerminateOnLastHandleClose(); err != nil { - return nil, errors.Wrap(err, "failed to set terminate on last handle closed for remotevm job object") + return nil, fmt.Errorf("failed to set terminate on last handle closed for remotevm job object: %w", err) } // Wait for stdout to close. This is our signal that the server is successfully up and running. @@ -69,7 +70,7 @@ func NewUVMBuilder(ctx context.Context, id, owner, binPath, addr string, guestOS conn, err := net.Dial("unix", addr) if err != nil { - return nil, errors.Wrapf(err, "failed to dial remotevm address %q", addr) + return nil, fmt.Errorf("failed to dial remotevm address %q: %w", addr, err) } c := ttrpc.NewClient(conn, ttrpc.WithOnClose(func() { conn.Close() })) @@ -94,11 +95,11 @@ func (uvmb *utilityVMBuilder) Create(ctx context.Context) (vm.UVM, error) { // Grab what capabilities the virtstack supports up front. capabilities, err := uvmb.client.CapabilitiesVM(ctx, &emptypb.Empty{}) if err != nil { - return nil, errors.Wrap(err, "failed to get virtstack capabilities from vmservice") + return nil, fmt.Errorf("failed to get virtstack capabilities from vmservice: %w", err) } if _, err := uvmb.client.CreateVM(ctx, &vmservice.CreateVMRequest{Config: uvmb.config, LogID: uvmb.id}); err != nil { - return nil, errors.Wrap(err, "failed to create remote VM") + return nil, fmt.Errorf("failed to create remote VM: %w", err) } return &utilityVM{ diff --git a/internal/vm/remotevm/network.go b/internal/vm/remotevm/network.go index d44f41c6a4..17ed7bca4d 100644 --- a/internal/vm/remotevm/network.go +++ b/internal/vm/remotevm/network.go @@ -5,20 +5,20 @@ package remotevm import ( "context" "encoding/json" + "errors" "fmt" "strings" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/hcn" "github.com/Microsoft/hcsshim/internal/vmservice" - "github.com/pkg/errors" ) func getSwitchID(endpointID, portID string) (string, error) { // Get updated endpoint with new fields (need switch ID) ep, err := hcn.GetEndpointByID(endpointID) if err != nil { - return "", errors.Wrapf(err, "failed to get endpoint %q", endpointID) + return "", fmt.Errorf("failed to get endpoint %q: %w", endpointID, err) } type ExtraInfo struct { @@ -30,7 +30,7 @@ func getSwitchID(endpointID, portID string) (string, error) { var exi ExtraInfo if err := json.Unmarshal(ep.Health.Extra.Resources, &exi); err != nil { - return "", errors.Wrapf(err, "failed to unmarshal resource data from endpoint %q", endpointID) + return "", fmt.Errorf("failed to unmarshal resource data from endpoint %q: %w", endpointID, err) } if len(exi.Allocators) == 0 { @@ -53,7 +53,7 @@ func getSwitchID(endpointID, portID string) (string, error) { func (uvm *utilityVM) AddNIC(ctx context.Context, nicID, endpointID, macAddr string) error { portID, err := guid.NewV4() if err != nil { - return errors.Wrap(err, "failed to generate guid for port") + return fmt.Errorf("failed to generate guid for port: %w", err) } vmEndpointRequest := hcn.VmEndpointRequest{ @@ -64,7 +64,7 @@ func (uvm *utilityVM) AddNIC(ctx context.Context, nicID, endpointID, macAddr str m, err := json.Marshal(vmEndpointRequest) if err != nil { - return errors.Wrap(err, "failed to marshal endpoint request json") + return fmt.Errorf("failed to marshal endpoint request json: %w", err) } if err := hcn.ModifyEndpointSettings(endpointID, &hcn.ModifyEndpointSettingRequest{ @@ -72,7 +72,7 @@ func (uvm *utilityVM) AddNIC(ctx context.Context, nicID, endpointID, macAddr str RequestType: hcn.RequestTypeAdd, Settings: json.RawMessage(m), }); err != nil { - return errors.Wrap(err, "failed to configure switch port") + return fmt.Errorf("failed to configure switch port: %w", err) } switchID, err := getSwitchID(endpointID, portID.String()) @@ -95,7 +95,7 @@ func (uvm *utilityVM) AddNIC(ctx context.Context, nicID, endpointID, macAddr str }, }, ); err != nil { - return errors.Wrap(err, "failed to add network adapter") + return fmt.Errorf("failed to add network adapter: %w", err) } return nil @@ -115,7 +115,7 @@ func (uvm *utilityVM) RemoveNIC(ctx context.Context, nicID, endpointID, macAddr }, }, ); err != nil { - return errors.Wrap(err, "failed to remove network adapter") + return fmt.Errorf("failed to remove network adapter: %w", err) } return nil diff --git a/internal/vm/remotevm/remotevm.go b/internal/vm/remotevm/remotevm.go index f02327c6c4..92f039d365 100644 --- a/internal/vm/remotevm/remotevm.go +++ b/internal/vm/remotevm/remotevm.go @@ -4,8 +4,8 @@ package remotevm import ( "context" + "fmt" - "github.com/pkg/errors" "google.golang.org/protobuf/types/known/emptypb" "github.com/Microsoft/hcsshim/internal/jobobject" @@ -41,14 +41,14 @@ func (uvm *utilityVM) ID() string { func (uvm *utilityVM) Start(ctx context.Context) error { // The expectation is the VM should be in a paused state after creation. if _, err := uvm.client.ResumeVM(ctx, &emptypb.Empty{}); err != nil { - return errors.Wrap(err, "failed to start remote VM") + return fmt.Errorf("failed to start remote VM: %w", err) } return nil } func (uvm *utilityVM) Stop(ctx context.Context) error { if _, err := uvm.client.TeardownVM(ctx, &emptypb.Empty{}); err != nil { - return errors.Wrap(err, "failed to stop remote VM") + return fmt.Errorf("failed to stop remote VM: %w", err) } return nil } @@ -57,21 +57,21 @@ func (uvm *utilityVM) Wait() error { _, err := uvm.client.WaitVM(context.Background(), &emptypb.Empty{}) if err != nil { uvm.waitError = err - return errors.Wrap(err, "failed to wait on remote VM") + return fmt.Errorf("failed to wait on remote VM: %w", err) } return nil } func (uvm *utilityVM) Pause(ctx context.Context) error { if _, err := uvm.client.PauseVM(ctx, &emptypb.Empty{}); err != nil { - return errors.Wrap(err, "failed to pause remote VM") + return fmt.Errorf("failed to pause remote VM: %w", err) } return nil } func (uvm *utilityVM) Resume(ctx context.Context) error { if _, err := uvm.client.ResumeVM(ctx, &emptypb.Empty{}); err != nil { - return errors.Wrap(err, "failed to resume remote VM") + return fmt.Errorf("failed to resume remote VM: %w", err) } return nil } diff --git a/internal/vm/remotevm/scsi.go b/internal/vm/remotevm/scsi.go index fc7863669e..56e1a2b967 100644 --- a/internal/vm/remotevm/scsi.go +++ b/internal/vm/remotevm/scsi.go @@ -8,7 +8,6 @@ import ( "github.com/Microsoft/hcsshim/internal/vm" "github.com/Microsoft/hcsshim/internal/vmservice" - "github.com/pkg/errors" ) func getSCSIDiskType(typ vm.SCSIDiskType) (vmservice.DiskType, error) { @@ -78,7 +77,7 @@ func (uvm *utilityVM) AddSCSIDisk(ctx context.Context, controller, lun uint32, p }, }, ); err != nil { - return errors.Wrap(err, "failed to add SCSI disk") + return fmt.Errorf("failed to add SCSI disk: %w", err) } return nil @@ -99,7 +98,7 @@ func (uvm *utilityVM) RemoveSCSIDisk(ctx context.Context, controller, lun uint32 }, }, ); err != nil { - return errors.Wrapf(err, "failed to remove SCSI disk %q", path) + return fmt.Errorf("failed to remove SCSI disk %q: %w", path, err) } return nil diff --git a/internal/vm/remotevm/storage.go b/internal/vm/remotevm/storage.go index 13a8358193..bcf64be54b 100644 --- a/internal/vm/remotevm/storage.go +++ b/internal/vm/remotevm/storage.go @@ -3,7 +3,7 @@ package remotevm import ( - "github.com/pkg/errors" + "fmt" ) func (uvmb *utilityVMBuilder) SetStorageQos(iopsMaximum int64, bandwidthMaximum int64) error { @@ -12,7 +12,7 @@ func (uvmb *utilityVMBuilder) SetStorageQos(iopsMaximum int64, bandwidthMaximum // in HCS we can do the same here as we launch the server process in a job object. if uvmb.job != nil { if err := uvmb.job.SetIOLimit(bandwidthMaximum, iopsMaximum); err != nil { - return errors.Wrap(err, "failed to set storage qos values on remotevm process") + return fmt.Errorf("failed to set storage qos values on remotevm process: %w", err) } } diff --git a/internal/vm/remotevm/vmsocket.go b/internal/vm/remotevm/vmsocket.go index 30be2769b9..281be2c19a 100644 --- a/internal/vm/remotevm/vmsocket.go +++ b/internal/vm/remotevm/vmsocket.go @@ -4,33 +4,34 @@ package remotevm import ( "context" + "errors" + "fmt" "net" "os" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/vm" "github.com/Microsoft/hcsshim/internal/vmservice" - "github.com/pkg/errors" ) func (uvm *utilityVM) VMSocketListen(ctx context.Context, listenType vm.VMSocketType, connID interface{}) (_ net.Listener, err error) { // Make a temp file and delete to "reserve" a unique name for the unix socket f, err := os.CreateTemp("", "") if err != nil { - return nil, errors.Wrap(err, "failed to create temp file for unix socket") + return nil, fmt.Errorf("failed to create temp file for unix socket: %w", err) } if err := f.Close(); err != nil { - return nil, errors.Wrap(err, "failed to close temp file") + return nil, fmt.Errorf("failed to close temp file: %w", err) } if err := os.Remove(f.Name()); err != nil { - return nil, errors.Wrap(err, "failed to delete temp file to free up name") + return nil, fmt.Errorf("failed to delete temp file to free up name: %w", err) } l, err := net.Listen("unix", f.Name()) if err != nil { - return nil, errors.Wrapf(err, "failed to listen on unix socket %q", f.Name()) + return nil, fmt.Errorf("failed to listen on unix socket %q: %w", f.Name(), err) } defer func() { @@ -46,7 +47,7 @@ func (uvm *utilityVM) VMSocketListen(ctx context.Context, listenType vm.VMSocket return nil, errors.New("parameter passed to hvsocketlisten is not a GUID") } if err := uvm.hvSocketListen(ctx, serviceGUID.String(), f.Name()); err != nil { - return nil, errors.Wrap(err, "failed to setup relay to hvsocket listener") + return nil, fmt.Errorf("failed to setup relay to hvsocket listener: %w", err) } case vm.VSock: port, ok := connID.(uint32) @@ -54,7 +55,7 @@ func (uvm *utilityVM) VMSocketListen(ctx context.Context, listenType vm.VMSocket return nil, errors.New("parameter passed to vsocklisten is not the right type") } if err := uvm.vsockListen(ctx, port, f.Name()); err != nil { - return nil, errors.Wrap(err, "failed to setup relay to vsock listener") + return nil, fmt.Errorf("failed to setup relay to vsock listener: %w", err) } default: return nil, errors.New("unknown vmsocket type requested") diff --git a/internal/wclayer/cim/registry.go b/internal/wclayer/cim/registry.go index c95b03ca37..478121af74 100644 --- a/internal/wclayer/cim/registry.go +++ b/internal/wclayer/cim/registry.go @@ -7,7 +7,6 @@ import ( "github.com/Microsoft/hcsshim/internal/winapi" "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" ) // mergeHive merges the hive located at parentHivePath with the hive located at deltaHivePath and stores @@ -21,7 +20,7 @@ func mergeHive(parentHivePath, deltaHivePath, mergedHivePath string) (err error) defer func() { err2 := winapi.ORCloseHive(baseHive) if err == nil { - err = errors.Wrap(err2, "failed to close base hive") + err = fmt.Errorf("failed to close base hive: %w", err2) } }() if err := winapi.OROpenHive(deltaHivePath, &deltaHive); err != nil { @@ -30,7 +29,7 @@ func mergeHive(parentHivePath, deltaHivePath, mergedHivePath string) (err error) defer func() { err2 := winapi.ORCloseHive(deltaHive) if err == nil { - err = errors.Wrap(err2, "failed to close delta hive") + err = fmt.Errorf("failed to close delta hive: %w", err2) } }() if err := winapi.ORMergeHives([]winapi.ORHKey{baseHive, deltaHive}, &mergedHive); err != nil { @@ -39,7 +38,7 @@ func mergeHive(parentHivePath, deltaHivePath, mergedHivePath string) (err error) defer func() { err2 := winapi.ORCloseHive(mergedHive) if err == nil { - err = errors.Wrap(err2, "failed to close merged hive") + err = fmt.Errorf("failed to close merged hive: %w", err2) } }() if err := winapi.ORSaveHive(mergedHive, mergedHivePath, uint32(osversion.Get().MajorVersion), uint32(osversion.Get().MinorVersion)); err != nil { diff --git a/internal/wclayer/converttobaselayer.go b/internal/wclayer/converttobaselayer.go index d25c3c5206..a316267e62 100644 --- a/internal/wclayer/converttobaselayer.go +++ b/internal/wclayer/converttobaselayer.go @@ -13,7 +13,7 @@ import ( "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/safefile" "github.com/Microsoft/hcsshim/internal/winapi" - "github.com/pkg/errors" + "go.opencensus.io/trace" "golang.org/x/sys/windows" ) @@ -85,7 +85,7 @@ func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) { if !stat.Mode().IsDir() { fullPath := filepath.Join(root.Name(), UtilityVMFilesPath) - return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String()) + return false, fmt.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String()) } const bcdRelativePath = "EFI\\Microsoft\\Boot\\BCD" @@ -97,12 +97,12 @@ func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) { stat, err = safefile.LstatRelative(bcdPath, root) if err != nil { - return false, errors.Wrapf(err, "UtilityVM must contain '%s'", bcdRelativePath) + return false, fmt.Errorf("UtilityVM must contain %q: %w", bcdRelativePath, err) } if !stat.Mode().IsRegular() { fullPath := filepath.Join(root.Name(), bcdPath) - return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String()) + return false, fmt.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String()) } return true, nil @@ -110,7 +110,6 @@ func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) { func convertToBaseLayer(ctx context.Context, root *os.File) error { hasUtilityVM, err := ensureBaseLayer(root) - if err != nil { return err } diff --git a/internal/windevice/devicequery.go b/internal/windevice/devicequery.go index 313c853abb..daa916c9c5 100644 --- a/internal/windevice/devicequery.go +++ b/internal/windevice/devicequery.go @@ -3,13 +3,13 @@ package windevice import ( + "errors" "fmt" "strings" "unicode/utf16" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/winapi" - "github.com/pkg/errors" ) const ( @@ -48,7 +48,7 @@ func GetDeviceLocationPathsFromIDs(ids []string) ([]string, error) { var devNodeInst uint32 err = winapi.CMLocateDevNode(&devNodeInst, id, _CM_LOCATE_DEVNODE_NORMAL) if err != nil { - return nil, errors.Wrapf(err, "failed to locate device node for %s", id) + return nil, fmt.Errorf("failed to locate device node for %s: %w", id, err) } propertyType := uint32(0) propertyBufferSize := uint32(0) @@ -56,14 +56,14 @@ func GetDeviceLocationPathsFromIDs(ids []string) ([]string, error) { // get the size of the property buffer by querying with a nil buffer and zeroed propertyBufferSize err = winapi.CMGetDevNodeProperty(devNodeInst, devPKeyDeviceLocationPaths, &propertyType, nil, &propertyBufferSize, 0) if err != nil { - return nil, errors.Wrapf(err, "failed to get property buffer size of devnode query for %s with", id) + return nil, fmt.Errorf("failed to get property buffer size of devnode query for %s with: %w", id, err) } // get the property with the resulting propertyBufferSize propertyBuffer := make([]uint16, propertyBufferSize/2) err = winapi.CMGetDevNodeProperty(devNodeInst, devPKeyDeviceLocationPaths, &propertyType, &propertyBuffer[0], &propertyBufferSize, 0) if err != nil { - return nil, errors.Wrapf(err, "failed to get location path property from device node for %s with", id) + return nil, fmt.Errorf("failed to get location path property from device node for %s with: %w", id, err) } if propertyType != _DEVPROP_TYPE_STRING_LIST { return nil, fmt.Errorf("expected to return property type DEVPROP_TYPE_STRING_LIST %d, instead got %d", _DEVPROP_TYPE_STRING_LIST, propertyType) diff --git a/pkg/cimfs/mount_cim.go b/pkg/cimfs/mount_cim.go index ea7341b2f0..4fea491d89 100644 --- a/pkg/cimfs/mount_cim.go +++ b/pkg/cimfs/mount_cim.go @@ -10,7 +10,6 @@ import ( "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/winapi" - "github.com/pkg/errors" ) type MountError struct { @@ -46,7 +45,7 @@ func Unmount(volumePath string) error { } if !(strings.HasPrefix(volumePath, "\\\\?\\Volume{") && strings.HasSuffix(volumePath, "}\\")) { - return errors.Errorf("volume path %s is not in the expected format", volumePath) + return fmt.Errorf("volume path %s is not in the expected format", volumePath) } trimmedStr := strings.TrimPrefix(volumePath, "\\\\?\\Volume{") @@ -54,7 +53,7 @@ func Unmount(volumePath string) error { volGUID, err := guid.FromString(trimmedStr) if err != nil { - return errors.Wrapf(err, "guid parsing failed for %s", trimmedStr) + return fmt.Errorf("guid parsing failed for %s: %w", trimmedStr, err) } if err := winapi.CimDismountImage(&volGUID); err != nil { diff --git a/pkg/securitypolicy/regopolicy_test.go b/pkg/securitypolicy/regopolicy_test.go index fa6a9560ca..4424f3bc53 100644 --- a/pkg/securitypolicy/regopolicy_test.go +++ b/pkg/securitypolicy/regopolicy_test.go @@ -7,6 +7,7 @@ import ( "context" _ "embed" "encoding/json" + "errors" "fmt" "math/rand" "sort" @@ -21,7 +22,6 @@ import ( "github.com/blang/semver/v4" "github.com/open-policy-agent/opa/rego" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) const ( diff --git a/pkg/securitypolicy/securitypolicy.go b/pkg/securitypolicy/securitypolicy.go index 2385fe3665..d5c977ed33 100644 --- a/pkg/securitypolicy/securitypolicy.go +++ b/pkg/securitypolicy/securitypolicy.go @@ -5,6 +5,7 @@ import ( _ "embed" "encoding/base64" "encoding/json" + "errors" "fmt" "regexp" "strconv" @@ -13,7 +14,6 @@ import ( "github.com/Microsoft/hcsshim/internal/guestpath" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) //go:embed framework.rego @@ -143,7 +143,7 @@ func ExtractPolicyDecision(errorMessage string) (string, error) { re := regexp.MustCompile(fmt.Sprintf(policyDecisionPattern, `(.*)`)) matches := re.FindStringSubmatch(errorMessage) if len(matches) != 2 { - return "", errors.Errorf("unable to extract policy decision from error message: %s", errorMessage) + return "", fmt.Errorf("unable to extract policy decision from error message: %s", errorMessage) } errorBytes, err := base64.StdEncoding.DecodeString(matches[1]) diff --git a/pkg/securitypolicy/securitypolicyenforcer.go b/pkg/securitypolicy/securitypolicyenforcer.go index b9a8561eba..384aec10fe 100644 --- a/pkg/securitypolicy/securitypolicyenforcer.go +++ b/pkg/securitypolicy/securitypolicyenforcer.go @@ -7,6 +7,7 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" "regexp" "strconv" @@ -18,7 +19,6 @@ import ( specInternal "github.com/Microsoft/hcsshim/internal/guest/spec" "github.com/Microsoft/hcsshim/internal/guestpath" - "github.com/pkg/errors" ) type createEnforcerFunc func(base64EncodedPolicy string, criMounts, criPrivilegedMounts []oci.Mount, maxErrorMessageLength int) (SecurityPolicyEnforcer, error) @@ -107,14 +107,14 @@ func newSecurityPolicyFromBase64JSON(base64EncodedPolicy string) (*SecurityPolic // we want to store a complex json object so.... base64 it is jsonPolicy, err := base64.StdEncoding.DecodeString(base64EncodedPolicy) if err != nil { - return nil, errors.Wrap(err, "unable to decode policy from Base64 format") + return nil, fmt.Errorf("unable to decode policy from Base64 format: %w", err) } // json unmarshall the decoded to a SecurityPolicy securityPolicy := new(SecurityPolicy) err = json.Unmarshal(jsonPolicy, securityPolicy) if err != nil { - return nil, errors.Wrap(err, "unable to unmarshal JSON policy") + return nil, fmt.Errorf("unable to unmarshal JSON policy: %w", err) } return securityPolicy, nil diff --git a/pkg/securitypolicy/securitypolicyenforcer_rego.go b/pkg/securitypolicy/securitypolicyenforcer_rego.go index d0b95081ca..5bb5a07155 100644 --- a/pkg/securitypolicy/securitypolicyenforcer_rego.go +++ b/pkg/securitypolicy/securitypolicyenforcer_rego.go @@ -8,6 +8,7 @@ import ( _ "embed" "encoding/base64" "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -21,7 +22,6 @@ import ( rpi "github.com/Microsoft/hcsshim/internal/regopolicyinterpreter" "github.com/opencontainers/runc/libcontainer/user" oci "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) const regoEnforcerName = "rego" @@ -997,7 +997,7 @@ func getUser(passwdPath string, filter func(user.User) bool) (user.User, error) return user.User{}, err } if len(users) != 1 { - return user.User{}, errors.Errorf("expected exactly 1 user matched '%d'", len(users)) + return user.User{}, fmt.Errorf("expected exactly 1 user matched '%d'", len(users)) } return users[0], nil } @@ -1008,7 +1008,7 @@ func getGroup(groupPath string, filter func(user.Group) bool) (user.Group, error return user.Group{}, err } if len(groups) != 1 { - return user.Group{}, errors.Errorf("expected exactly 1 group matched '%d'", len(groups)) + return user.Group{}, fmt.Errorf("expected exactly 1 group matched '%d'", len(groups)) } return groups[0], nil } diff --git a/test/cri-containerd/helpers/log.go b/test/cri-containerd/helpers/log.go index 8c58021d3c..366444508b 100644 --- a/test/cri-containerd/helpers/log.go +++ b/test/cri-containerd/helpers/log.go @@ -11,7 +11,6 @@ import ( "sync" "github.com/Microsoft/go-winio" - "github.com/pkg/errors" ) func main() { @@ -31,24 +30,24 @@ func logContainerStdoutToFile() (err error) { waitPipe := os.Getenv("CONTAINER_WAIT") if sout, err = winio.DialPipeContext(ctx, soutPipe); err != nil { - return errors.Wrap(err, "couldn't open stdout pipe") + return fmt.Errorf("couldn't open stdout pipe: %w", err) } defer sout.Close() // The only expected argument should be output file path if len(os.Args[1:]) != 1 { - return errors.Errorf("Expected exactly 1 argument, got: %d", len(os.Args[1:])) + return fmt.Errorf("Expected exactly 1 argument, got: %d", len(os.Args[1:])) } var dest *os.File destPath := os.Args[1] if dest, err = os.Create(destPath); err != nil { - return errors.Wrap(err, "couldn't open destination file") + return fmt.Errorf("couldn't open destination file: %w", err) } defer dest.Close() if wait, err = winio.DialPipeContext(ctx, waitPipe); err != nil { - return errors.Wrap(err, "couldn't open wait pipe") + return fmt.Errorf("couldn't open wait pipe: %w", err) } // Indicate that logging binary is ready to receive output wait.Close() diff --git a/test/cri-containerd/test-images/unordered_tar/main.go b/test/cri-containerd/test-images/unordered_tar/main.go index aa63dacc1d..3857a6bc08 100644 --- a/test/cri-containerd/test-images/unordered_tar/main.go +++ b/test/cri-containerd/test-images/unordered_tar/main.go @@ -13,13 +13,12 @@ import ( digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) func createBlob(blobsDirPath string, blobContents io.Reader) (int64, digest.Digest, error) { tempFile, err := os.CreateTemp(blobsDirPath, "") if err != nil { - return 0, "", errors.Wrapf(err, "failed to create file") + return 0, "", fmt.Errorf("failed to create file: %w", err) } defer tempFile.Close() @@ -27,14 +26,14 @@ func createBlob(blobsDirPath string, blobContents io.Reader) (int64, digest.Dige multiWriter := io.MultiWriter(tempFile, hasher) written, err := io.Copy(multiWriter, blobContents) if err != nil { - return 0, "", errors.Wrap(err, "failed to copy content") + return 0, "", fmt.Errorf("failed to copy content: %w", err) } dgst := fmt.Sprintf("sha256:%x", hasher.Sum(nil)) tempFile.Close() // name the blob file with its digest (excluding the first `sha256:` part) if err = os.Rename(tempFile.Name(), filepath.Join(blobsDirPath, dgst[7:])); err != nil { - return 0, "", errors.Wrap(err, "renaming content file failed") + return 0, "", fmt.Errorf("renaming content file failed: %w", err) } return written, digest.Digest(dgst), nil } @@ -42,7 +41,7 @@ func createBlob(blobsDirPath string, blobContents io.Reader) (int64, digest.Dige func createBlobFromTar(tarPath, blobsDirPath string) (int64, digest.Digest, error) { srcFile, err := os.Open(tarPath) if err != nil { - return 0, "", errors.Wrap(err, "failed to open content source file") + return 0, "", fmt.Errorf("failed to open content source file: %w", err) } defer srcFile.Close() @@ -53,14 +52,14 @@ func createBlobFromTar(tarPath, blobsDirPath string) (int64, digest.Digest, erro func createBlobFromStruct(blobsDirPath string, data interface{}) (int64, digest.Digest, error) { dataJson, err := json.Marshal(data) if err != nil { - return 0, "", errors.Wrap(err, "failed to marshal struct") + return 0, "", fmt.Errorf("failed to marshal struct: %w", err) } // copy config buf := bytes.NewBuffer(dataJson) clen, dgst, err := createBlob(blobsDirPath, buf) if err != nil { - return 0, "", errors.Wrap(err, "config content write failed") + return 0, "", fmt.Errorf("config content write failed: %w", err) } return clen, dgst, nil } @@ -83,11 +82,11 @@ func createOciLayoutFile(dirPath string) error { // create oci layout file flayout, err := os.Create(filepath.Join(dirPath, "oci-layout")) if err != nil { - return errors.Wrap(err, "failed to create oci layout") + return fmt.Errorf("failed to create oci layout: %w", err) } _, err = flayout.Write([]byte(`{"imageLayoutVersion":"1.0.0"}`)) if err != nil { - return errors.Wrap(err, "failed to write layout file") + return fmt.Errorf("failed to write layout file: %w", err) } return nil } @@ -127,14 +126,14 @@ func createImageFromLayerTars(layerTars []string) error { // converted into the image tar at the end. tempDirPath, err := os.MkdirTemp("", "imagecreator-imagedir") if err != nil { - return errors.Wrap(err, "failed to create temporary directory") + return fmt.Errorf("failed to create temporary directory: %w", err) } defer os.RemoveAll(tempDirPath) sha256dirPath := filepath.Join(tempDirPath, "blobs", "sha256") err = os.MkdirAll(sha256dirPath, 0777) if err != nil { - return errors.Wrap(err, "failed to create blobs dir") + return fmt.Errorf("failed to create blobs dir: %w", err) } // copy all layer tar as blobs @@ -142,7 +141,7 @@ func createImageFromLayerTars(layerTars []string) error { for _, layerTar := range layerTars { llen, dgst, err := createBlobFromTar(layerTar, sha256dirPath) if err != nil { - return errors.Wrap(err, "layer content write failed") + return fmt.Errorf("layer content write failed: %w", err) } layerBlobs = append(layerBlobs, ocispec.Descriptor{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", @@ -154,7 +153,7 @@ func createImageFromLayerTars(layerTars []string) error { // create image config blob clen, cdgst, err := createBlobFromStruct(sha256dirPath, createMinimalConfig(layerBlobs)) if err != nil { - return errors.Wrap(err, "failed to create config blob") + return fmt.Errorf("failed to create config blob: %w", err) } // create manifest blob @@ -166,7 +165,7 @@ func createImageFromLayerTars(layerTars []string) error { manifest.Layers = layerBlobs mlen, mdgst, err := createBlobFromStruct(sha256dirPath, manifest) if err != nil { - return errors.Wrap(err, "failed to crate blob for manifest") + return fmt.Errorf("failed to crate blob for manifest: %w", err) } // create index file @@ -180,16 +179,16 @@ func createImageFromLayerTars(layerTars []string) error { indexJson, err := json.Marshal(index) if err != nil { - return errors.Wrap(err, "failed to marshal index json") + return fmt.Errorf("failed to marshal index json: %w", err) } findex, err := os.Create(filepath.Join(tempDirPath, "index.json")) if err != nil { - return errors.Wrap(err, "failed to create index file") + return fmt.Errorf("failed to create index file: %w", err) } _, err = findex.Write(indexJson) if err != nil { - return errors.Wrap(err, "failed to write index.json") + return fmt.Errorf("failed to write index.json: %w", err) } // create oci layout file @@ -200,7 +199,7 @@ func createImageFromLayerTars(layerTars []string) error { // create tar from the image tarCmd := exec.Command("tar", "-C", tempDirPath, "-cf", "testimage.tar", ".") if err = tarCmd.Run(); err != nil { - return errors.Wrap(err, "image tar creation failed") + return fmt.Errorf("image tar creation failed: %w", err) } return nil } diff --git a/test/cri-containerd/test-images/unordered_tar/tar_generator.go b/test/cri-containerd/test-images/unordered_tar/tar_generator.go index d3f7bd0eb7..f4347bb2d1 100644 --- a/test/cri-containerd/test-images/unordered_tar/tar_generator.go +++ b/test/cri-containerd/test-images/unordered_tar/tar_generator.go @@ -5,8 +5,6 @@ import ( "fmt" "os" "path/filepath" - - "github.com/pkg/errors" ) type tarContents struct { @@ -32,11 +30,11 @@ func writeContentsToTar(tw *tar.Writer, contents []tarContents) error { } } if err := tw.WriteHeader(hdr); err != nil { - return errors.Wrapf(err, "failed to write tar header for file: %s", file.path) + return fmt.Errorf("failed to write tar header for file: %s: %w", file.path, err) } if !isDir { if _, err := tw.Write(file.body); err != nil { - return errors.Wrapf(err, "failed to write contents of file: %s", file.path) + return fmt.Errorf("failed to write contents of file: %s: %w", file.path, err) } } } @@ -75,14 +73,14 @@ func createUnorderedTars(dirPath string) ([]string, error) { layerPath := filepath.Join(dirPath, fmt.Sprintf("tar%d.tar", i+1)) layerTar, err := os.Create(layerPath) if err != nil { - return []string{}, errors.Wrapf(err, "failed to create tar at path: %s", layerPath) + return []string{}, fmt.Errorf("failed to create tar at path: %s: %w", layerPath, err) } defer layerTar.Close() tw := tar.NewWriter(layerTar) defer tw.Close() if err = writeContentsToTar(tw, layer); err != nil { - return []string{}, errors.Wrapf(err, "failed to write tar contents for tar : %s", layerPath) + return []string{}, fmt.Errorf("failed to write tar contents for tar : %s: %w", layerPath, err) } generatedTars = append(generatedTars, layerPath) diff --git a/test/go.mod b/test/go.mod index b514212f11..3797d8a38f 100644 --- a/test/go.mod +++ b/test/go.mod @@ -19,7 +19,6 @@ require ( github.com/opencontainers/image-spec v1.1.0 github.com/opencontainers/runtime-spec v1.2.0 github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 - github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 github.com/urfave/cli/v2 v2.27.1 go.opencensus.io v0.24.0 @@ -92,6 +91,7 @@ require ( github.com/opencontainers/runc v1.1.14 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.20.2 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect diff --git a/test/runhcs/e2e_matrix_test.go b/test/runhcs/e2e_matrix_test.go index f566441f54..1ee4705a3c 100644 --- a/test/runhcs/e2e_matrix_test.go +++ b/test/runhcs/e2e_matrix_test.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "os" "os/exec" @@ -23,7 +24,7 @@ import ( "github.com/Microsoft/hcsshim/test/pkg/require" runc "github.com/containerd/go-runc" "github.com/opencontainers/runtime-tools/generate" - "github.com/pkg/errors" + "golang.org/x/sync/errgroup" ) @@ -157,11 +158,11 @@ func getWindowsImageNameByVersion(t *testing.T, bv int) string { func readPidFile(path string) (int, error) { data, err := os.ReadFile(path) if err != nil { - return -1, errors.Wrap(err, "failed to read pidfile") + return -1, fmt.Errorf("failed to read pidfile: %w", err) } p, err := strconv.Atoi(string(data)) if err != nil { - return -1, errors.Wrap(err, "pidfile failed to parse pid") + return -1, fmt.Errorf("pidfile failed to parse pid: %w", err) } return p, nil }