diff --git a/cmd/thanos/bucket.go b/cmd/thanos/bucket.go index d4ed513cce..1a3ea49731 100644 --- a/cmd/thanos/bucket.go +++ b/cmd/thanos/bucket.go @@ -269,7 +269,7 @@ func registerBucketInspect(m map[string]setupFunc, root *kingpin.CmdClause, name // Parse selector. selectorLabels, err := parseFlagLabels(*selector) if err != nil { - return errors.Errorf("error parsing selector flag: %v", err) + return errors.Wrap(err, "error parsing selector flag") } confContentYaml, err := objStoreConfig.Content() diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 972c4bade5..cdcf3c3ae1 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -740,7 +740,7 @@ func queryFunc( return v, nil } } - return nil, errors.Errorf("no query API server reachable") + return nil, errors.New("no query API server reachable") } } diff --git a/pkg/block/block.go b/pkg/block/block.go index df83f17649..51b8012974 100644 --- a/pkg/block/block.go +++ b/pkg/block/block.go @@ -87,7 +87,7 @@ func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st } if meta.Thanos.Labels == nil || len(meta.Thanos.Labels) == 0 { - return errors.Errorf("empty external labels are not allowed for Thanos block.") + return errors.New("empty external labels are not allowed for Thanos block.") } if err := objstore.UploadFile(ctx, logger, bkt, path.Join(bdir, MetaFilename), path.Join(DebugMetas, fmt.Sprintf("%s.json", id))); err != nil { diff --git a/pkg/compact/downsample/streamed_block_writer.go b/pkg/compact/downsample/streamed_block_writer.go index 267a93bfab..a987c6fa95 100644 --- a/pkg/compact/downsample/streamed_block_writer.go +++ b/pkg/compact/downsample/streamed_block_writer.go @@ -108,7 +108,7 @@ func NewStreamedBlockWriter( // labelsValues sets and memPostings to be written on the finalize state in the end of downsampling process. func (w *streamedBlockWriter) WriteSeries(lset labels.Labels, chunks []chunks.Meta) error { if w.finalized || w.ignoreFinalize { - return errors.Errorf("series can't be added, writers has been closed or internal error happened") + return errors.New("series can't be added, writers has been closed or internal error happened") } if len(chunks) == 0 { diff --git a/pkg/discovery/dns/miekgdns/lookup.go b/pkg/discovery/dns/miekgdns/lookup.go index 4e3fb492e2..b9b95ce908 100644 --- a/pkg/discovery/dns/miekgdns/lookup.go +++ b/pkg/discovery/dns/miekgdns/lookup.go @@ -140,7 +140,7 @@ func askServerForName(name string, qType dns.Type, client *dns.Client, servAddr if response.Truncated { if client.Net == "tcp" { - return nil, errors.Errorf("got truncated message on TCP (64kiB limit exceeded?)") + return nil, errors.New("got truncated message on TCP (64kiB limit exceeded?)") } // TCP fallback. diff --git a/pkg/discovery/dns/resolver_test.go b/pkg/discovery/dns/resolver_test.go index f7d271d0c8..5663ca7380 100644 --- a/pkg/discovery/dns/resolver_test.go +++ b/pkg/discovery/dns/resolver_test.go @@ -76,7 +76,7 @@ var ( addr: "test.mycompany.com", qtype: A, expectedResult: nil, - expectedErr: errors.Errorf("missing port in address given for dns lookup: test.mycompany.com"), + expectedErr: errors.New("missing port in address given for dns lookup: test.mycompany.com"), resolver: &mockHostnameResolver{}, }, { @@ -168,7 +168,7 @@ var ( addr: "test.mycompany.com", qtype: "invalid", expectedResult: nil, - expectedErr: errors.Errorf("invalid lookup scheme \"invalid\""), + expectedErr: errors.New("invalid lookup scheme \"invalid\""), resolver: &mockHostnameResolver{}, }, } diff --git a/pkg/objstore/cos/cos.go b/pkg/objstore/cos/cos.go index 65768d46b1..e336a78fc7 100644 --- a/pkg/objstore/cos/cos.go +++ b/pkg/objstore/cos/cos.go @@ -151,7 +151,7 @@ func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error) err func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { if len(name) == 0 { - return nil, errors.Errorf("given object name should not empty") + return nil, errors.New("given object name should not empty") } opts := &cos.ObjectGetOptions{} diff --git a/pkg/prober/http_test.go b/pkg/prober/http_test.go index bf09f526f2..eb3a69f3f6 100644 --- a/pkg/prober/http_test.go +++ b/pkg/prober/http_test.go @@ -30,7 +30,7 @@ func TestHTTPProberReadinessInitialState(t *testing.T) { } func TestHTTPProberHealthyStatusSetting(t *testing.T) { - testError := errors.Errorf("test error") + testError := errors.New("test error") p := NewHTTP() p.Healthy() @@ -43,7 +43,7 @@ func TestHTTPProberHealthyStatusSetting(t *testing.T) { } func TestHTTPProberReadyStatusSetting(t *testing.T) { - testError := errors.Errorf("test error") + testError := errors.New("test error") p := NewHTTP() p.Ready() @@ -72,7 +72,7 @@ func TestHTTPProberMuxRegistering(t *testing.T) { var g run.Group g.Add(func() error { - return errors.Errorf("serve probes %w", http.Serve(l, mux)) + return errors.Wrap(http.Serve(l, mux), "serve probes") }, func(err error) { t.Fatalf("server failed: %v", err) }) diff --git a/pkg/query/api/v1.go b/pkg/query/api/v1.go index 089f227e0f..2842055a5a 100644 --- a/pkg/query/api/v1.go +++ b/pkg/query/api/v1.go @@ -347,7 +347,7 @@ func (api *API) queryRange(r *http.Request) (interface{}, []error, *ApiError) { // For safety, limit the number of returned points per timeseries. // This is sufficient for 60s resolution for a week or 1h resolution for a year. if end.Sub(start)/step > 11000 { - err := errors.Errorf("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)") + err := errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)") return nil, nil, &ApiError{errorBadData, err} } @@ -456,7 +456,7 @@ func (api *API) series(r *http.Request) (interface{}, []error, *ApiError) { } if len(r.Form["match[]"]) == 0 { - return nil, nil, &ApiError{errorBadData, errors.Errorf("no match[] parameter provided")} + return nil, nil, &ApiError{errorBadData, errors.New("no match[] parameter provided")} } var start time.Time diff --git a/pkg/replicate/replicater.go b/pkg/replicate/replicater.go index c4428e8696..73fb35e455 100644 --- a/pkg/replicate/replicater.go +++ b/pkg/replicate/replicater.go @@ -171,7 +171,7 @@ func RunReplicate( level.Info(logger).Log("msg", "running replication attempt") if err := newReplicationScheme(logger, metrics, blockFilter, fromBkt, toBkt, reg).execute(ctx); err != nil { - return errors.Errorf("replication execute: %w", err) + return errors.Wrap(err, "replication execute") } return nil diff --git a/pkg/replicate/scheme.go b/pkg/replicate/scheme.go index b237bab5e1..71d3ce7f33 100644 --- a/pkg/replicate/scheme.go +++ b/pkg/replicate/scheme.go @@ -192,7 +192,7 @@ func (rs *replicationScheme) execute(ctx context.Context) error { return nil } if err != nil { - return errors.Errorf("load meta for block %v from origin bucket: %w", id.String(), err) + return errors.Wrapf(err, "load meta for block %v from origin bucket", id.String()) } if len(meta.Thanos.Labels) == 0 { @@ -207,7 +207,7 @@ func (rs *replicationScheme) execute(ctx context.Context) error { return nil }); err != nil { - return errors.Errorf("iterate over origin bucket: %w", err) + return errors.Wrap(err, "iterate over origin bucket") } candidateBlocks := []*metadata.Meta{} @@ -227,7 +227,7 @@ func (rs *replicationScheme) execute(ctx context.Context) error { for _, b := range candidateBlocks { if err := rs.ensureBlockIsReplicated(ctx, b.BlockMeta.ULID); err != nil { - return errors.Errorf("ensure block %v is replicated: %w", b.BlockMeta.ULID.String(), err) + return errors.Wrapf(err, "ensure block %v is replicated", b.BlockMeta.ULID.String()) } } @@ -246,7 +246,7 @@ func (rs *replicationScheme) ensureBlockIsReplicated(ctx context.Context, id uli originMetaFile, err := rs.fromBkt.Get(ctx, metaFile) if err != nil { - return errors.Errorf("get meta file from origin bucket: %w", err) + return errors.Wrap(err, "get meta file from origin bucket") } defer runutil.CloseWithLogOnErr(rs.logger, originMetaFile, "close original meta file") @@ -258,18 +258,18 @@ func (rs *replicationScheme) ensureBlockIsReplicated(ctx context.Context, id uli } if err != nil && !rs.toBkt.IsObjNotFoundErr(err) && err != io.EOF { - return errors.Errorf("get meta file from target bucket: %w", err) + return errors.Wrap(err, "get meta file from target bucket") } originMetaFileContent, err := ioutil.ReadAll(originMetaFile) if err != nil { - return errors.Errorf("read origin meta file: %w", err) + return errors.Wrap(err, "read origin meta file") } if targetMetaFile != nil && !rs.toBkt.IsObjNotFoundErr(err) { targetMetaFileContent, err := ioutil.ReadAll(targetMetaFile) if err != nil { - return errors.Errorf("read target meta file: %w", err) + return errors.Wrap(err, "read target meta file") } if bytes.Equal(originMetaFileContent, targetMetaFileContent) { @@ -286,7 +286,7 @@ func (rs *replicationScheme) ensureBlockIsReplicated(ctx context.Context, id uli if err := rs.fromBkt.Iter(ctx, chunksDir, func(objectName string) error { err := rs.ensureObjectReplicated(ctx, objectName) if err != nil { - return errors.Errorf("replicate object %v: %w", objectName, err) + return errors.Wrapf(err, "replicate object %v", objectName) } return nil @@ -295,13 +295,13 @@ func (rs *replicationScheme) ensureBlockIsReplicated(ctx context.Context, id uli } if err := rs.ensureObjectReplicated(ctx, indexFile); err != nil { - return errors.Errorf("replicate index file: %w", err) + return errors.Wrap(err, "replicate index file") } level.Debug(rs.logger).Log("msg", "replicating meta file", "object", metaFile) if err := rs.toBkt.Upload(ctx, metaFile, bytes.NewReader(originMetaFileContent)); err != nil { - return errors.Errorf("upload meta file: %w", err) + return errors.Wrap(err, "upload meta file") } rs.metrics.blocksReplicated.Inc() @@ -316,7 +316,7 @@ func (rs *replicationScheme) ensureObjectReplicated(ctx context.Context, objectN exists, err := rs.toBkt.Exists(ctx, objectName) if err != nil { - return errors.Errorf("check if %v exists in target bucket: %w", objectName, err) + return errors.Wrapf(err, "check if %v exists in target bucket", objectName) } // skip if already exists. @@ -329,13 +329,13 @@ func (rs *replicationScheme) ensureObjectReplicated(ctx context.Context, objectN r, err := rs.fromBkt.Get(ctx, objectName) if err != nil { - return errors.Errorf("get %v from origin bucket: %w", objectName, err) + return errors.Wrapf(err, "get %v from origin bucket", objectName) } defer r.Close() if err = rs.toBkt.Upload(ctx, objectName, r); err != nil { - return errors.Errorf("upload %v to target bucket: %w", objectName, err) + return errors.Wrapf(err, "upload %v to target bucket", objectName) } level.Info(rs.logger).Log("msg", "object replicated", "object", objectName) @@ -352,22 +352,22 @@ func (rs *replicationScheme) ensureObjectReplicated(ctx context.Context, objectN func loadMeta(ctx context.Context, rs *replicationScheme, id ulid.ULID) (*metadata.Meta, bool, error) { fetcher, err := thanosblock.NewMetaFetcher(rs.logger, 32, rs.fromBkt, "", rs.reg) if err != nil { - return nil, false, errors.Errorf("create meta fetcher with buecket %v: %w", rs.fromBkt, err) + return nil, false, errors.Wrapf(err, "create meta fetcher with buecket %v", rs.fromBkt) } metas, _, err := fetcher.Fetch(ctx) if err != nil { switch errors.Cause(err) { default: - return nil, false, errors.Errorf("fetch meta: %w", err) + return nil, false, errors.Wrap(err, "fetch meta") case thanosblock.ErrorSyncMetaNotFound: - return nil, true, errors.Errorf("fetch meta: %w", err) + return nil, true, errors.Wrap(err, "fetch meta") } } m, ok := metas[id] if !ok { - return nil, true, errors.Errorf("fetch meta: %w", err) + return nil, true, errors.Wrap(err, "fetch meta") } return m, false, nil diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 7c17a2fa95..15f818d956 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -1854,7 +1854,7 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, offs []uint32, seq i l, n := binary.Uvarint(cb) if n < 1 { - return errors.Errorf("reading chunk length failed") + return errors.New("reading chunk length failed") } if len(cb) < n+int(l)+1 { return errors.Errorf("preloaded chunk too small, expecting %d", n+int(l)+1)