diff --git a/docs/configuration/README.md b/docs/configuration/README.md index 0d4633be0d46c..46b2f399734b1 100644 --- a/docs/configuration/README.md +++ b/docs/configuration/README.md @@ -268,7 +268,7 @@ The `ingester_config` block configures Ingesters. [chunk_idle_period: | default = 30m] # The targeted _uncompressed_ size in bytes of a chunk block -# When this threshold is exceeded the head block will be cut and compressed inside the chunk +# When this threshold is exceeded the head block will be cut and compressed inside the chunk [chunk_block_size: | default = 262144] # A target _compressed_ size in bytes for chunks. @@ -277,6 +277,13 @@ The `ingester_config` block configures Ingesters. # The default value of 0 for this will create chunks with a fixed 10 blocks, # A non zero value will create chunks with a variable number of blocks to meet the target size. [chunk_target_size: | default = 0] + +# The compression algorithm to use for chunks. (supported: gzip, gzip-1, lz4, none, snappy, snappyv2) +# You should choose your algorithm depending on your need: +# - `gzip` highest compression ratio but also slowest decompression speed. (144 kB per chunk) +# - `lz4` fastest compression speed (188 kB per chunk) +# - `snappy` fast and popular compression algorithm (272 kB per chunk) +[chunk_encoding: | default = gzip] ``` ### lifecycler_config diff --git a/go.mod b/go.mod index a20367fe1e6df..3ec811d3ca5d6 100644 --- a/go.mod +++ b/go.mod @@ -16,8 +16,10 @@ require ( github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8 + github.com/dustin/go-humanize v1.0.0 github.com/fatih/color v1.7.0 github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c + github.com/frankban/quicktest v1.7.2 // indirect github.com/go-kit/kit v0.9.0 github.com/gocql/gocql v0.0.0-20181124151448-70385f88b28b // indirect github.com/gogo/protobuf v1.3.0 // remember to update loki-build-image/Dockerfile too @@ -31,14 +33,14 @@ require ( github.com/influxdata/go-syslog/v2 v2.0.1 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af github.com/json-iterator/go v1.1.7 - github.com/klauspost/compress v1.7.4 - github.com/klauspost/cpuid v1.2.1 // indirect + github.com/klauspost/compress v1.9.4 github.com/mitchellh/mapstructure v1.1.2 github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opentracing/opentracing-go v1.1.0 + github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 diff --git a/go.sum b/go.sum index cb6a1b5c22241..b5caaf5fda951 100644 --- a/go.sum +++ b/go.sum @@ -159,6 +159,8 @@ github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+p github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c/go.mod h1:WQX+afhrekY9rGK+WT4xvKSlzmia9gDoLYu4GGYGASQ= github.com/fluent/fluent-logger-golang v1.2.1/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.3.0 h1:f2mbomatUsbw8NRY7rzqiiWNn4BRM+Jredz0Pt70Usg= @@ -394,10 +396,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.7.4 h1:4UqAIzZ1Ns2epCTyJ1d2xMWvxtX+FNSCYWeOFogK9nc= -github.com/klauspost/compress v1.7.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/compress v1.9.4 h1:xhvAeUPQ2drNUhKtrGdTGNvV9nNafHMUkRyLkzxJoB4= +github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -506,6 +506,8 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible h1:5isCJDRADbeSlWx6KVXAYwrcihyCGVXr7GNCdLEVDr8= +github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/pkg/chunkenc/facade.go b/pkg/chunkenc/facade.go index 8556b0fdd0442..d603920e3e653 100644 --- a/pkg/chunkenc/facade.go +++ b/pkg/chunkenc/facade.go @@ -7,13 +7,18 @@ import ( ) // GzipLogChunk is a cortex encoding type for our chunks. +// Deprecated: the chunk encoding/compression format is inside the chunk data. const GzipLogChunk = encoding.Encoding(128) +// LogChunk is a cortex encoding type for our chunks. +const LogChunk = encoding.Encoding(129) + func init() { encoding.MustRegisterEncoding(GzipLogChunk, "GzipLogChunk", func() encoding.Chunk { - return &Facade{ - c: NewMemChunk(EncGZIP), - } + return &Facade{} + }) + encoding.MustRegisterEncoding(LogChunk, "LogChunk", func() encoding.Chunk { + return &Facade{} }) } @@ -32,6 +37,9 @@ func NewFacade(c Chunk) encoding.Chunk { // Marshal implements encoding.Chunk. func (f Facade) Marshal(w io.Writer) error { + if f.c == nil { + return nil + } buf, err := f.c.Bytes() if err != nil { return err @@ -49,11 +57,14 @@ func (f *Facade) UnmarshalFromBuf(buf []byte) error { // Encoding implements encoding.Chunk. func (Facade) Encoding() encoding.Encoding { - return GzipLogChunk + return LogChunk } // Utilization implements encoding.Chunk. func (f Facade) Utilization() float64 { + if f.c == nil { + return 0 + } return f.c.Utilization() } @@ -66,7 +77,7 @@ func (f Facade) LokiChunk() Chunk { func UncompressedSize(c encoding.Chunk) (int, bool) { f, ok := c.(*Facade) - if !ok { + if !ok || f.c == nil { return 0, false } diff --git a/pkg/chunkenc/gzip_test.go b/pkg/chunkenc/gzip_test.go deleted file mode 100644 index 7cebc3d1373be..0000000000000 --- a/pkg/chunkenc/gzip_test.go +++ /dev/null @@ -1,396 +0,0 @@ -package chunkenc - -import ( - "bytes" - "fmt" - "math" - "math/rand" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/stretchr/testify/require" - - "github.com/grafana/loki/pkg/logproto" -) - -func TestGZIPBlock(t *testing.T) { - chk := NewMemChunk(EncGZIP) - - cases := []struct { - ts int64 - str string - cut bool - }{ - { - ts: 1, - str: "hello, world!", - }, - { - ts: 2, - str: "hello, world2!", - }, - { - ts: 3, - str: "hello, world3!", - }, - { - ts: 4, - str: "hello, world4!", - }, - { - ts: 5, - str: "hello, world5!", - }, - { - ts: 6, - str: "hello, world6!", - cut: true, - }, - { - ts: 7, - str: "hello, world7!", - }, - { - ts: 8, - str: "hello, worl\nd8!", - }, - { - ts: 8, - str: "hello, world 8, 2!", - }, - { - ts: 8, - str: "hello, world 8, 3!", - }, - { - ts: 9, - str: "", - }, - } - - for _, c := range cases { - require.NoError(t, chk.Append(logprotoEntry(c.ts, c.str))) - if c.cut { - require.NoError(t, chk.cut()) - } - } - - it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil) - require.NoError(t, err) - - idx := 0 - for it.Next() { - e := it.Entry() - require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) - require.Equal(t, cases[idx].str, e.Line) - idx++ - } - - require.NoError(t, it.Error()) - require.Equal(t, len(cases), idx) - - t.Run("bounded-iteration", func(t *testing.T) { - it, err := chk.Iterator(time.Unix(0, 3), time.Unix(0, 7), logproto.FORWARD, nil) - require.NoError(t, err) - - idx := 2 - for it.Next() { - e := it.Entry() - require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) - require.Equal(t, cases[idx].str, e.Line) - idx++ - } - require.NoError(t, it.Error()) - require.Equal(t, 6, idx) - }) -} - -func TestGZIPSerialisation(t *testing.T) { - chk := NewMemChunk(EncGZIP) - - numSamples := 500000 - - for i := 0; i < numSamples; i++ { - require.NoError(t, chk.Append(logprotoEntry(int64(i), string(i)))) - } - - byt, err := chk.Bytes() - require.NoError(t, err) - - bc, err := NewByteChunk(byt) - require.NoError(t, err) - - it, err := bc.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil) - require.NoError(t, err) - for i := 0; i < numSamples; i++ { - require.True(t, it.Next()) - - e := it.Entry() - require.Equal(t, int64(i), e.Timestamp.UnixNano()) - require.Equal(t, string(i), e.Line) - } - - require.NoError(t, it.Error()) - - byt2, err := chk.Bytes() - require.NoError(t, err) - - require.True(t, bytes.Equal(byt, byt2)) -} - -func TestGZIPChunkFilling(t *testing.T) { - chk := NewMemChunk(EncGZIP) - chk.blockSize = 1024 - - // We should be able to append only 10KB of logs. - maxBytes := chk.blockSize * blocksPerChunk - lineSize := 512 - lines := maxBytes / lineSize - - logLine := string(make([]byte, lineSize)) - entry := &logproto.Entry{ - Timestamp: time.Unix(0, 0), - Line: logLine, - } - - i := int64(0) - for ; chk.SpaceFor(entry) && i < 30; i++ { - entry.Timestamp = time.Unix(0, i) - require.NoError(t, chk.Append(entry)) - } - - require.Equal(t, int64(lines), i) - - it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, nil) - require.NoError(t, err) - i = 0 - for it.Next() { - entry := it.Entry() - require.Equal(t, i, entry.Timestamp.UnixNano()) - i++ - } - - require.Equal(t, int64(lines), i) -} - -func TestGZIPChunkTargetSize(t *testing.T) { - targetSize := 1024 * 1024 - chk := NewMemChunkSize(EncGZIP, 1024, targetSize) - - lineSize := 512 - entry := &logproto.Entry{ - Timestamp: time.Unix(0, 0), - Line: "", - } - - // Use a random number to generate random log data, otherwise the gzip compression is way too good - // and the following loop has to run waaayyyyy to many times - // Using the same seed should guarantee the same random numbers and same test data. - r := rand.New(rand.NewSource(99)) - - i := int64(0) - - for ; chk.SpaceFor(entry) && i < 5000; i++ { - logLine := make([]byte, lineSize) - for j := range logLine { - logLine[j] = byte(r.Int()) - } - entry = &logproto.Entry{ - Timestamp: time.Unix(0, 0), - Line: string(logLine), - } - entry.Timestamp = time.Unix(0, i) - require.NoError(t, chk.Append(entry)) - } - - // 5000 is a limit ot make sure the test doesn't run away, we shouldn't need this many log lines to make 1MB chunk - require.NotEqual(t, 5000, i) - - require.NoError(t, chk.Close()) - - require.Equal(t, 0, chk.head.size) - - // Even though the seed is static above and results should be deterministic, - // we will allow +/- 10% variance - minSize := int(float64(targetSize) * 0.9) - maxSize := int(float64(targetSize) * 1.1) - require.Greater(t, chk.CompressedSize(), minSize) - require.Less(t, chk.CompressedSize(), maxSize) - - // Also verify our utilization is close to 1.0 - ut := chk.Utilization() - require.Greater(t, ut, 0.99) - require.Less(t, ut, 1.01) - -} - -func TestMemChunk_AppendOutOfOrder(t *testing.T) { - t.Parallel() - - type tester func(t *testing.T, chk *MemChunk) - - tests := map[string]tester{ - "append out of order in the same block": func(t *testing.T, chk *MemChunk) { - assert.NoError(t, chk.Append(logprotoEntry(5, "test"))) - assert.NoError(t, chk.Append(logprotoEntry(6, "test"))) - - assert.EqualError(t, chk.Append(logprotoEntry(1, "test")), ErrOutOfOrder.Error()) - }, - "append out of order in a new block right after cutting the previous one": func(t *testing.T, chk *MemChunk) { - assert.NoError(t, chk.Append(logprotoEntry(5, "test"))) - assert.NoError(t, chk.Append(logprotoEntry(6, "test"))) - assert.NoError(t, chk.cut()) - - assert.EqualError(t, chk.Append(logprotoEntry(1, "test")), ErrOutOfOrder.Error()) - }, - "append out of order in a new block after multiple cuts": func(t *testing.T, chk *MemChunk) { - assert.NoError(t, chk.Append(logprotoEntry(5, "test"))) - assert.NoError(t, chk.cut()) - - assert.NoError(t, chk.Append(logprotoEntry(6, "test"))) - assert.NoError(t, chk.cut()) - - assert.EqualError(t, chk.Append(logprotoEntry(1, "test")), ErrOutOfOrder.Error()) - }, - } - - for testName, tester := range tests { - tester := tester - - t.Run(testName, func(t *testing.T) { - t.Parallel() - - tester(t, NewMemChunk(EncGZIP)) - }) - } -} - -var result []Chunk - -func BenchmarkWriteGZIP(b *testing.B) { - chunks := []Chunk{} - - entry := &logproto.Entry{ - Timestamp: time.Unix(0, 0), - Line: RandString(512), - } - i := int64(0) - - for n := 0; n < b.N; n++ { - c := NewMemChunk(EncGZIP) - // adds until full so we trigger cut which serialize using gzip - for c.SpaceFor(entry) { - _ = c.Append(entry) - entry.Timestamp = time.Unix(0, i) - i++ - } - chunks = append(chunks, c) - } - result = chunks -} - -func BenchmarkReadGZIP(b *testing.B) { - chunks := []Chunk{} - i := int64(0) - for n := 0; n < 50; n++ { - entry := randSizeEntry(0) - c := NewMemChunk(EncGZIP) - // adds until full so we trigger cut which serialize using gzip - for c.SpaceFor(entry) { - _ = c.Append(entry) - i++ - entry = randSizeEntry(i) - } - c.Close() - chunks = append(chunks, c) - } - entries := []logproto.Entry{} - b.ResetTimer() - for n := 0; n < b.N; n++ { - var wg sync.WaitGroup - for _, c := range chunks { - wg.Add(1) - go func(c Chunk) { - iterator, err := c.Iterator(time.Unix(0, 0), time.Now(), logproto.BACKWARD, nil) - if err != nil { - panic(err) - } - for iterator.Next() { - entries = append(entries, iterator.Entry()) - } - iterator.Close() - wg.Done() - }(c) - } - wg.Wait() - } -} - -func BenchmarkHeadBlockIterator(b *testing.B) { - - for _, j := range []int{100000, 50000, 15000, 10000} { - b.Run(fmt.Sprintf("Size %d", j), func(b *testing.B) { - - h := headBlock{} - - for i := 0; i < j; i++ { - if err := h.append(int64(i), "this is the append string"); err != nil { - b.Fatal(err) - } - } - - b.ResetTimer() - - for n := 0; n < b.N; n++ { - iter := h.iterator(0, math.MaxInt64, nil) - - for iter.Next() { - _ = iter.Entry() - } - } - }) - } -} - -func randSizeEntry(ts int64) *logproto.Entry { - var line string - switch ts % 10 { - case 0: - line = RandString(27000) - case 1: - line = RandString(10000) - case 2, 3, 4, 5: - line = RandString(2048) - default: - line = RandString(4096) - } - return &logproto.Entry{ - Timestamp: time.Unix(0, ts), - Line: line, - } -} - -const charset = "abcdefghijklmnopqrstuvwxyz" + - "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - -func RandStringWithCharset(length int, charset string) string { - b := make([]byte, length) - for i := range b { - b[i] = charset[rand.Intn(len(charset)-1)] - } - return string(b) -} - -func RandString(length int) string { - return RandStringWithCharset(length, charset) -} - -func logprotoEntry(ts int64, line string) *logproto.Entry { - return &logproto.Entry{ - Timestamp: time.Unix(0, ts), - Line: line, - } -} diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go index b9446b45a95e1..bec5966116b9d 100644 --- a/pkg/chunkenc/interface.go +++ b/pkg/chunkenc/interface.go @@ -2,7 +2,8 @@ package chunkenc import ( "errors" - "io" + "fmt" + "strings" "time" "github.com/grafana/loki/pkg/iter" @@ -20,15 +21,23 @@ var ( ) // Encoding is the identifier for a chunk encoding. -type Encoding uint8 +type Encoding byte // The different available encodings. const ( EncNone Encoding = iota EncGZIP EncDumb + EncLZ4 + EncSnappy ) +var supportedEncoding = []Encoding{ + EncGZIP, + EncLZ4, + EncSnappy, +} + func (e Encoding) String() string { switch e { case EncGZIP: @@ -37,11 +46,38 @@ func (e Encoding) String() string { return "none" case EncDumb: return "dumb" + case EncLZ4: + return "lz4" + case EncSnappy: + return "snappy" default: return "unknown" } } +// ParseEncoding parses an chunk encoding (compression algorithm) by its name. +func ParseEncoding(enc string) (Encoding, error) { + for _, e := range supportedEncoding { + if strings.EqualFold(e.String(), enc) { + return e, nil + } + } + return 0, fmt.Errorf("invalid encoding: %s, supported: %s", enc, SupportedEncoding()) + +} + +// SupportedEncoding returns the list of supported Encoding. +func SupportedEncoding() string { + var sb strings.Builder + for i := range supportedEncoding { + sb.WriteString(supportedEncoding[i].String()) + if i != len(supportedEncoding)-1 { + sb.WriteString(", ") + } + } + return sb.String() +} + // Chunk is the interface for the compressed logs chunk format. type Chunk interface { Bounds() (time.Time, time.Time) @@ -56,17 +92,3 @@ type Chunk interface { CompressedSize() int Close() error } - -// CompressionWriter is the writer that compresses the data passed to it. -type CompressionWriter interface { - Write(p []byte) (int, error) - Close() error - Flush() error - Reset(w io.Writer) -} - -// CompressionReader reads the compressed data. -type CompressionReader interface { - Read(p []byte) (int, error) - Reset(r io.Reader) error -} diff --git a/pkg/chunkenc/interface_test.go b/pkg/chunkenc/interface_test.go new file mode 100644 index 0000000000000..abb61a673eeb4 --- /dev/null +++ b/pkg/chunkenc/interface_test.go @@ -0,0 +1,26 @@ +package chunkenc + +import "testing" + +func TestParseEncoding(t *testing.T) { + tests := []struct { + enc string + want Encoding + wantErr bool + }{ + {"gzip", EncGZIP, false}, + {"bad", 0, true}, + } + for _, tt := range tests { + t.Run(tt.enc, func(t *testing.T) { + got, err := ParseEncoding(tt.enc) + if (err != nil) != tt.wantErr { + t.Errorf("ParseEncoding() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ParseEncoding() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/chunkenc/gzip.go b/pkg/chunkenc/memchunk.go similarity index 85% rename from pkg/chunkenc/gzip.go rename to pkg/chunkenc/memchunk.go index 2132205c523ff..cbf10b577868c 100644 --- a/pkg/chunkenc/gzip.go +++ b/pkg/chunkenc/memchunk.go @@ -23,6 +23,7 @@ var ( magicNumber = uint32(0x12EE56A) chunkFormatV1 = byte(1) + chunkFormatV2 = byte(2) ) // The table gets initialized with sync.Once but may still cause a race @@ -55,8 +56,12 @@ type MemChunk struct { // Current in-mem block being appended to. head *headBlock + // the chunk format default to v2 + format byte encoding Encoding - cPool CompressionPool + + readers ReaderPool + writers WriterPool } type block struct { @@ -99,32 +104,34 @@ func (hb *headBlock) append(ts int64, line string) error { return nil } -func (hb *headBlock) serialise(pool CompressionPool) ([]byte, error) { - buf := &bytes.Buffer{} +func (hb *headBlock) serialise(pool WriterPool) ([]byte, error) { + inBuf := serializeBytesBufferPool.Get().(*bytes.Buffer) + outBuf := &bytes.Buffer{} + encBuf := make([]byte, binary.MaxVarintLen64) - compressedWriter := pool.GetWriter(buf) + compressedWriter := pool.GetWriter(outBuf) for _, logEntry := range hb.entries { n := binary.PutVarint(encBuf, logEntry.t) - _, err := compressedWriter.Write(encBuf[:n]) - if err != nil { - return nil, errors.Wrap(err, "appending entry") - } + inBuf.Write(encBuf[:n]) n = binary.PutUvarint(encBuf, uint64(len(logEntry.s))) - _, err = compressedWriter.Write(encBuf[:n]) - if err != nil { - return nil, errors.Wrap(err, "appending entry") - } - _, err = compressedWriter.Write([]byte(logEntry.s)) - if err != nil { - return nil, errors.Wrap(err, "appending entry") - } + inBuf.Write(encBuf[:n]) + + inBuf.WriteString(logEntry.s) + } + + if _, err := compressedWriter.Write(inBuf.Bytes()); err != nil { + return nil, errors.Wrap(err, "appending entry") } if err := compressedWriter.Close(); err != nil { return nil, errors.Wrap(err, "flushing pending compress buffer") } + + inBuf.Reset() + serializeBytesBufferPool.Put(inBuf) + pool.PutWriter(compressedWriter) - return buf.Bytes(), nil + return outBuf.Bytes(), nil } type entry struct { @@ -132,6 +139,11 @@ type entry struct { s string } +// NewMemChunk returns a new in-mem chunk for query. +func NewMemChunk(enc Encoding) *MemChunk { + return NewMemChunkSize(enc, 256*1024, 0) +} + // NewMemChunkSize returns a new in-mem chunk. // Mainly for config push size. func NewMemChunkSize(enc Encoding, blockSize, targetSize int) *MemChunk { @@ -140,34 +152,22 @@ func NewMemChunkSize(enc Encoding, blockSize, targetSize int) *MemChunk { targetSize: targetSize, // Desired chunk size in compressed bytes blocks: []block{}, - head: &headBlock{}, + head: &headBlock{}, + format: chunkFormatV2, encoding: enc, - } - - switch enc { - case EncGZIP: - c.cPool = &Gzip - default: - panic("unknown encoding") + writers: getWriterPool(enc), + readers: getReaderPool(enc), } return c } -// NewMemChunk returns a new in-mem chunk for query. -func NewMemChunk(enc Encoding) *MemChunk { - return NewMemChunkSize(enc, 256*1024, 0) -} - // NewByteChunk returns a MemChunk on the passed bytes. func NewByteChunk(b []byte) (*MemChunk, error) { bc := &MemChunk{ - cPool: &Gzip, - encoding: EncGZIP, - head: &headBlock{}, // Dummy, empty headblock. + head: &headBlock{}, // Dummy, empty headblock. } - db := decbuf{b: b} // Verify the header. @@ -178,7 +178,18 @@ func NewByteChunk(b []byte) (*MemChunk, error) { if m != magicNumber { return nil, errors.Errorf("invalid magic number %x", m) } - if version != 1 { + bc.format = version + switch version { + case chunkFormatV1: + bc.readers, bc.writers = &Gzip, &Gzip + case chunkFormatV2: + // format v2 has a byte for block encoding. + enc := Encoding(db.byte()) + if db.err() != nil { + return nil, errors.Wrap(db.err(), "verifying encoding") + } + bc.readers, bc.writers = getReaderPool(enc), getWriterPool(enc) + default: return nil, errors.Errorf("invalid version %d", version) } @@ -242,7 +253,11 @@ func (c *MemChunk) Bytes() ([]byte, error) { // Write the header (magicNum + version). eb.putBE32(magicNumber) - eb.putByte(chunkFormatV1) + eb.putByte(c.format) + if c.format == chunkFormatV2 { + // chunk format v2 has a byte for encoding. + eb.putByte(byte(c.encoding)) + } n, err := buf.Write(eb.get()) if err != nil { @@ -401,7 +416,7 @@ func (c *MemChunk) cut() error { return nil } - b, err := c.head.serialise(c.cPool) + b, err := c.head.serialise(c.writers) if err != nil { return err } @@ -451,7 +466,7 @@ func (c *MemChunk) Iterator(mintT, maxtT time.Time, direction logproto.Direction for _, b := range c.blocks { if maxt > b.mint && b.maxt > mint { - its = append(its, b.iterator(c.cPool, filter)) + its = append(its, b.iterator(c.readers, filter)) } } @@ -472,7 +487,7 @@ func (c *MemChunk) Iterator(mintT, maxtT time.Time, direction logproto.Direction return iter.NewEntryIteratorBackward(iterForward) } -func (b block) iterator(pool CompressionPool, filter logql.Filter) iter.EntryIterator { +func (b block) iterator(pool ReaderPool, filter logql.Filter) iter.EntryIterator { if len(b.b) == 0 { return emptyIterator } @@ -537,9 +552,11 @@ func (li *listIterator) Close() error { return nil } func (li *listIterator) Labels() string { return "" } type bufferedIterator struct { - s *bufio.Reader - reader CompressionReader - pool CompressionPool + origBytes []byte + + bufReader *bufio.Reader + reader io.Reader + pool ReaderPool cur logproto.Entry @@ -553,18 +570,24 @@ type bufferedIterator struct { filter logql.Filter } -func newBufferedIterator(pool CompressionPool, b []byte, filter logql.Filter) *bufferedIterator { - r := pool.GetReader(bytes.NewBuffer(b)) +func newBufferedIterator(pool ReaderPool, b []byte, filter logql.Filter) *bufferedIterator { return &bufferedIterator{ - s: BufReaderPool.Get(r), - reader: r, - pool: pool, - filter: filter, - decBuf: make([]byte, binary.MaxVarintLen64), + origBytes: b, + reader: nil, // will be initialized later + bufReader: nil, // will be initialized later + pool: pool, + filter: filter, + decBuf: make([]byte, binary.MaxVarintLen64), } } func (si *bufferedIterator) Next() bool { + if !si.closed && si.reader == nil { + // initialize reader now, hopefully reusing one of the previous readers + si.reader = si.pool.GetReader(bytes.NewBuffer(si.origBytes)) + si.bufReader = BufReaderPool.Get(si.reader) + } + for { ts, line, ok := si.moveNext() if !ok { @@ -582,7 +605,7 @@ func (si *bufferedIterator) Next() bool { // moveNext moves the buffer to the next entry func (si *bufferedIterator) moveNext() (int64, []byte, bool) { - ts, err := binary.ReadVarint(si.s) + ts, err := binary.ReadVarint(si.bufReader) if err != nil { if err != io.EOF { si.err = err @@ -590,7 +613,7 @@ func (si *bufferedIterator) moveNext() (int64, []byte, bool) { return 0, nil, false } - l, err := binary.ReadUvarint(si.s) + l, err := binary.ReadUvarint(si.bufReader) if err != nil { if err != io.EOF { si.err = err @@ -612,13 +635,13 @@ func (si *bufferedIterator) moveNext() (int64, []byte, bool) { } // Then process reading the line. - n, err := si.s.Read(si.buf[:lineSize]) + n, err := si.bufReader.Read(si.buf[:lineSize]) if err != nil && err != io.EOF { si.err = err return 0, nil, false } for n < lineSize { - r, err := si.s.Read(si.buf[n:lineSize]) + r, err := si.bufReader.Read(si.buf[n:lineSize]) if err != nil { si.err = err return 0, nil, false @@ -638,11 +661,12 @@ func (si *bufferedIterator) Close() error { if !si.closed { si.closed = true si.pool.PutReader(si.reader) - BufReaderPool.Put(si.s) + BufReaderPool.Put(si.bufReader) if si.buf != nil { BytesBufferPool.Put(si.buf) } - si.s = nil + si.origBytes = nil + si.bufReader = nil si.buf = nil si.decBuf = nil si.reader = nil diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go new file mode 100644 index 0000000000000..433d478a5d070 --- /dev/null +++ b/pkg/chunkenc/memchunk_test.go @@ -0,0 +1,418 @@ +package chunkenc + +import ( + "bytes" + "fmt" + "math" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/dustin/go-humanize" + "github.com/grafana/loki/pkg/chunkenc/testdata" + "github.com/grafana/loki/pkg/logproto" + "github.com/stretchr/testify/require" +) + +var testEncoding = []Encoding{ + EncNone, + EncGZIP, + EncLZ4, + EncSnappy, +} + +func TestBlock(t *testing.T) { + for _, enc := range testEncoding { + t.Run(enc.String(), func(t *testing.T) { + chk := NewMemChunk(enc) + cases := []struct { + ts int64 + str string + cut bool + }{ + { + ts: 1, + str: "hello, world!", + }, + { + ts: 2, + str: "hello, world2!", + }, + { + ts: 3, + str: "hello, world3!", + }, + { + ts: 4, + str: "hello, world4!", + }, + { + ts: 5, + str: "hello, world5!", + }, + { + ts: 6, + str: "hello, world6!", + cut: true, + }, + { + ts: 7, + str: "hello, world7!", + }, + { + ts: 8, + str: "hello, worl\nd8!", + }, + { + ts: 8, + str: "hello, world 8, 2!", + }, + { + ts: 8, + str: "hello, world 8, 3!", + }, + { + ts: 9, + str: "", + }, + } + + for _, c := range cases { + require.NoError(t, chk.Append(logprotoEntry(c.ts, c.str))) + if c.cut { + require.NoError(t, chk.cut()) + } + } + + it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil) + require.NoError(t, err) + + idx := 0 + for it.Next() { + e := it.Entry() + require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) + require.Equal(t, cases[idx].str, e.Line) + idx++ + } + + require.NoError(t, it.Error()) + require.Equal(t, len(cases), idx) + + t.Run("bounded-iteration", func(t *testing.T) { + it, err := chk.Iterator(time.Unix(0, 3), time.Unix(0, 7), logproto.FORWARD, nil) + require.NoError(t, err) + + idx := 2 + for it.Next() { + e := it.Entry() + require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano()) + require.Equal(t, cases[idx].str, e.Line) + idx++ + } + require.NoError(t, it.Error()) + require.Equal(t, 6, idx) + }) + }) + } +} + +func TestReadFormatV1(t *testing.T) { + c := NewMemChunk(EncGZIP) + fillChunk(c) + // overrides default v2 format + c.format = chunkFormatV1 + + b, err := c.Bytes() + if err != nil { + t.Fatal(err) + } + + r, err := NewByteChunk(b) + if err != nil { + t.Fatal(err) + } + + it, err := r.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil) + if err != nil { + t.Fatal(err) + } + + i := int64(0) + for it.Next() { + require.Equal(t, i, it.Entry().Timestamp.UnixNano()) + require.Equal(t, testdata.LogString(i), it.Entry().Line) + + i++ + } +} + +func TestSerialization(t *testing.T) { + for _, enc := range testEncoding { + t.Run(enc.String(), func(t *testing.T) { + chk := NewMemChunk(enc) + + numSamples := 500000 + + for i := 0; i < numSamples; i++ { + require.NoError(t, chk.Append(logprotoEntry(int64(i), string(i)))) + } + + byt, err := chk.Bytes() + require.NoError(t, err) + + bc, err := NewByteChunk(byt) + require.NoError(t, err) + + it, err := bc.Iterator(time.Unix(0, 0), time.Unix(0, math.MaxInt64), logproto.FORWARD, nil) + require.NoError(t, err) + for i := 0; i < numSamples; i++ { + require.True(t, it.Next()) + + e := it.Entry() + require.Equal(t, int64(i), e.Timestamp.UnixNano()) + require.Equal(t, string(i), e.Line) + } + + require.NoError(t, it.Error()) + + byt2, err := chk.Bytes() + require.NoError(t, err) + + require.True(t, bytes.Equal(byt, byt2)) + }) + } +} + +func TestChunkFilling(t *testing.T) { + for _, enc := range testEncoding { + t.Run(enc.String(), func(t *testing.T) { + chk := NewMemChunk(enc) + chk.blockSize = 1024 + + // We should be able to append only 10KB of logs. + maxBytes := chk.blockSize * blocksPerChunk + lineSize := 512 + lines := maxBytes / lineSize + + logLine := string(make([]byte, lineSize)) + entry := &logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: logLine, + } + + i := int64(0) + for ; chk.SpaceFor(entry) && i < 30; i++ { + entry.Timestamp = time.Unix(0, i) + require.NoError(t, chk.Append(entry)) + } + + require.Equal(t, int64(lines), i) + + it, err := chk.Iterator(time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, nil) + require.NoError(t, err) + i = 0 + for it.Next() { + entry := it.Entry() + require.Equal(t, i, entry.Timestamp.UnixNano()) + i++ + } + + require.Equal(t, int64(lines), i) + }) + } +} + +func TestGZIPChunkTargetSize(t *testing.T) { + targetSize := 1024 * 1024 + chk := NewMemChunkSize(EncGZIP, 1024, targetSize) + + lineSize := 512 + entry := &logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: "", + } + + // Use a random number to generate random log data, otherwise the gzip compression is way too good + // and the following loop has to run waaayyyyy to many times + // Using the same seed should guarantee the same random numbers and same test data. + r := rand.New(rand.NewSource(99)) + + i := int64(0) + + for ; chk.SpaceFor(entry) && i < 5000; i++ { + logLine := make([]byte, lineSize) + for j := range logLine { + logLine[j] = byte(r.Int()) + } + entry = &logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: string(logLine), + } + entry.Timestamp = time.Unix(0, i) + require.NoError(t, chk.Append(entry)) + } + + // 5000 is a limit ot make sure the test doesn't run away, we shouldn't need this many log lines to make 1MB chunk + require.NotEqual(t, 5000, i) + + require.NoError(t, chk.Close()) + + require.Equal(t, 0, chk.head.size) + + // Even though the seed is static above and results should be deterministic, + // we will allow +/- 10% variance + minSize := int(float64(targetSize) * 0.9) + maxSize := int(float64(targetSize) * 1.1) + require.Greater(t, chk.CompressedSize(), minSize) + require.Less(t, chk.CompressedSize(), maxSize) + + // Also verify our utilization is close to 1.0 + ut := chk.Utilization() + require.Greater(t, ut, 0.99) + require.Less(t, ut, 1.01) + +} + +func TestMemChunk_AppendOutOfOrder(t *testing.T) { + t.Parallel() + + type tester func(t *testing.T, chk *MemChunk) + + tests := map[string]tester{ + "append out of order in the same block": func(t *testing.T, chk *MemChunk) { + assert.NoError(t, chk.Append(logprotoEntry(5, "test"))) + assert.NoError(t, chk.Append(logprotoEntry(6, "test"))) + + assert.EqualError(t, chk.Append(logprotoEntry(1, "test")), ErrOutOfOrder.Error()) + }, + "append out of order in a new block right after cutting the previous one": func(t *testing.T, chk *MemChunk) { + assert.NoError(t, chk.Append(logprotoEntry(5, "test"))) + assert.NoError(t, chk.Append(logprotoEntry(6, "test"))) + assert.NoError(t, chk.cut()) + + assert.EqualError(t, chk.Append(logprotoEntry(1, "test")), ErrOutOfOrder.Error()) + }, + "append out of order in a new block after multiple cuts": func(t *testing.T, chk *MemChunk) { + assert.NoError(t, chk.Append(logprotoEntry(5, "test"))) + assert.NoError(t, chk.cut()) + + assert.NoError(t, chk.Append(logprotoEntry(6, "test"))) + assert.NoError(t, chk.cut()) + + assert.EqualError(t, chk.Append(logprotoEntry(1, "test")), ErrOutOfOrder.Error()) + }, + } + + for testName, tester := range tests { + tester := tester + + t.Run(testName, func(t *testing.T) { + t.Parallel() + + tester(t, NewMemChunk(EncGZIP)) + }) + } +} + +func TestChunkSize(t *testing.T) { + for _, enc := range testEncoding { + t.Run(enc.String(), func(t *testing.T) { + c := NewMemChunk(enc) + inserted := fillChunk(c) + b, err := c.Bytes() + if err != nil { + t.Fatal(err) + } + t.Log("Chunk size", humanize.Bytes(uint64(len(b)))) + t.Log("characters ", inserted) + }) + + } +} + +var result []Chunk + +func BenchmarkWrite(b *testing.B) { + chunks := []Chunk{} + + entry := &logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: testdata.LogString(0), + } + i := int64(0) + + for _, enc := range testEncoding { + b.Run(enc.String(), func(b *testing.B) { + for n := 0; n < b.N; n++ { + c := NewMemChunk(enc) + // adds until full so we trigger cut which serialize using gzip + for c.SpaceFor(entry) { + _ = c.Append(entry) + entry.Timestamp = time.Unix(0, i) + entry.Line = testdata.LogString(i) + i++ + } + chunks = append(chunks, c) + } + result = chunks + }) + } + +} + +func BenchmarkRead(b *testing.B) { + for _, enc := range testEncoding { + b.Run(enc.String(), func(b *testing.B) { + chunks := generateData(enc) + b.ResetTimer() + bytesRead := int64(0) + now := time.Now() + for n := 0; n < b.N; n++ { + for _, c := range chunks { + // use forward iterator for benchmark -- backward iterator does extra allocations by keeping entries in memory + iterator, err := c.Iterator(time.Unix(0, 0), time.Now(), logproto.FORWARD, nil) + if err != nil { + panic(err) + } + for iterator.Next() { + e := iterator.Entry() + bytesRead += int64(len(e.Line)) + } + if err := iterator.Close(); err != nil { + b.Fatal(err) + } + } + } + b.Log("bytes per second ", humanize.Bytes(uint64(float64(bytesRead)/time.Since(now).Seconds()))) + b.Log("n=", b.N) + }) + } +} + +func BenchmarkHeadBlockIterator(b *testing.B) { + + for _, j := range []int{100000, 50000, 15000, 10000} { + b.Run(fmt.Sprintf("Size %d", j), func(b *testing.B) { + + h := headBlock{} + + for i := 0; i < j; i++ { + if err := h.append(int64(i), "this is the append string"); err != nil { + b.Fatal(err) + } + } + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + iter := h.iterator(0, math.MaxInt64, nil) + + for iter.Next() { + _ = iter.Entry() + } + } + }) + } +} diff --git a/pkg/chunkenc/pool.go b/pkg/chunkenc/pool.go index 12a9e9df61904..51461d44fcbfe 100644 --- a/pkg/chunkenc/pool.go +++ b/pkg/chunkenc/pool.go @@ -2,25 +2,39 @@ package chunkenc import ( "bufio" + "bytes" "io" "sync" + "github.com/golang/snappy" "github.com/klauspost/compress/gzip" + "github.com/pierrec/lz4" "github.com/prometheus/prometheus/pkg/pool" ) -// CompressionPool is a pool of CompressionWriter and CompressionReader +// WriterPool is a pool of io.Writer // This is used by every chunk to avoid unnecessary allocations. -type CompressionPool interface { - GetWriter(io.Writer) CompressionWriter - PutWriter(CompressionWriter) - GetReader(io.Reader) CompressionReader - PutReader(CompressionReader) +type WriterPool interface { + GetWriter(io.Writer) io.WriteCloser + PutWriter(io.WriteCloser) +} + +// ReaderPool similar to WriterPool but for reading chunks. +type ReaderPool interface { + GetReader(io.Reader) io.Reader + PutReader(io.Reader) } var ( // Gzip is the gun zip compression pool - Gzip GzipPool + Gzip = GzipPool{level: gzip.DefaultCompression} + // LZ4 is the l4z compression pool + LZ4 LZ4Pool + // Snappy is the snappy compression pool + Snappy SnappyPool + // Noop is the no compression pool + Noop NoopPool + // BufReaderPool is bufio.Reader pool BufReaderPool = &BufioReaderPool{ pool: sync.Pool{ @@ -29,54 +43,180 @@ var ( } // BytesBufferPool is a bytes buffer used for lines decompressed. // Buckets [0.5KB,1KB,2KB,4KB,8KB] - BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) }) + BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) }) + serializeBytesBufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + } ) +func getWriterPool(enc Encoding) WriterPool { + return getReaderPool(enc).(WriterPool) +} + +func getReaderPool(enc Encoding) ReaderPool { + switch enc { + case EncGZIP: + return &Gzip + case EncLZ4: + return &LZ4 + case EncSnappy: + return &Snappy + case EncNone: + return &Noop + default: + panic("unknown encoding") + } +} + // GzipPool is a gun zip compression pool type GzipPool struct { readers sync.Pool writers sync.Pool + level int } // GetReader gets or creates a new CompressionReader and reset it to read from src -func (pool *GzipPool) GetReader(src io.Reader) (reader CompressionReader) { +func (pool *GzipPool) GetReader(src io.Reader) io.Reader { if r := pool.readers.Get(); r != nil { - reader = r.(CompressionReader) + reader := r.(*gzip.Reader) err := reader.Reset(src) if err != nil { panic(err) } - } else { - var err error - reader, err = gzip.NewReader(src) - if err != nil { - panic(err) - } + return reader + } + reader, err := gzip.NewReader(src) + if err != nil { + panic(err) } return reader } // PutReader places back in the pool a CompressionReader -func (pool *GzipPool) PutReader(reader CompressionReader) { +func (pool *GzipPool) PutReader(reader io.Reader) { pool.readers.Put(reader) } // GetWriter gets or creates a new CompressionWriter and reset it to write to dst -func (pool *GzipPool) GetWriter(dst io.Writer) (writer CompressionWriter) { +func (pool *GzipPool) GetWriter(dst io.Writer) io.WriteCloser { if w := pool.writers.Get(); w != nil { - writer = w.(CompressionWriter) + writer := w.(*gzip.Writer) writer.Reset(dst) - } else { - writer = gzip.NewWriter(dst) + return writer + } + + level := pool.level + if level == 0 { + level = gzip.DefaultCompression + } + w, err := gzip.NewWriterLevel(dst, level) + if err != nil { + panic(err) // never happens, error is only returned on wrong compression level. } - return writer + return w } // PutWriter places back in the pool a CompressionWriter -func (pool *GzipPool) PutWriter(writer CompressionWriter) { +func (pool *GzipPool) PutWriter(writer io.WriteCloser) { pool.writers.Put(writer) } +type LZ4Pool struct { + readers sync.Pool + writers sync.Pool +} + +// GetReader gets or creates a new CompressionReader and reset it to read from src +func (pool *LZ4Pool) GetReader(src io.Reader) io.Reader { + if r := pool.readers.Get(); r != nil { + reader := r.(*lz4.Reader) + reader.Reset(src) + return reader + } + return lz4.NewReader(src) +} + +// PutReader places back in the pool a CompressionReader +func (pool *LZ4Pool) PutReader(reader io.Reader) { + pool.readers.Put(reader) +} + +// GetWriter gets or creates a new CompressionWriter and reset it to write to dst +func (pool *LZ4Pool) GetWriter(dst io.Writer) io.WriteCloser { + if w := pool.writers.Get(); w != nil { + writer := w.(*lz4.Writer) + writer.Reset(dst) + return writer + } + return lz4.NewWriter(dst) +} + +// PutWriter places back in the pool a CompressionWriter +func (pool *LZ4Pool) PutWriter(writer io.WriteCloser) { + pool.writers.Put(writer) +} + +type SnappyPool struct { + readers sync.Pool + writers sync.Pool +} + +// GetReader gets or creates a new CompressionReader and reset it to read from src +func (pool *SnappyPool) GetReader(src io.Reader) io.Reader { + if r := pool.readers.Get(); r != nil { + reader := r.(*snappy.Reader) + reader.Reset(src) + return reader + } + return snappy.NewReader(src) +} + +// PutReader places back in the pool a CompressionReader +func (pool *SnappyPool) PutReader(reader io.Reader) { + pool.readers.Put(reader) +} + +// GetWriter gets or creates a new CompressionWriter and reset it to write to dst +func (pool *SnappyPool) GetWriter(dst io.Writer) io.WriteCloser { + if w := pool.writers.Get(); w != nil { + writer := w.(*snappy.Writer) + writer.Reset(dst) + return writer + } + return snappy.NewBufferedWriter(dst) +} + +// PutWriter places back in the pool a CompressionWriter +func (pool *SnappyPool) PutWriter(writer io.WriteCloser) { + pool.writers.Put(writer) +} + +type NoopPool struct{} + +// GetReader gets or creates a new CompressionReader and reset it to read from src +func (pool *NoopPool) GetReader(src io.Reader) io.Reader { + return src +} + +// PutReader places back in the pool a CompressionReader +func (pool *NoopPool) PutReader(reader io.Reader) {} + +type noopCloser struct { + io.Writer +} + +func (noopCloser) Close() error { return nil } + +// GetWriter gets or creates a new CompressionWriter and reset it to write to dst +func (pool *NoopPool) GetWriter(dst io.Writer) io.WriteCloser { + return noopCloser{dst} +} + +// PutWriter places back in the pool a CompressionWriter +func (pool *NoopPool) PutWriter(writer io.WriteCloser) {} + // BufioReaderPool is a bufio reader that uses sync.Pool. type BufioReaderPool struct { pool sync.Pool diff --git a/pkg/chunkenc/testdata/testdata.go b/pkg/chunkenc/testdata/testdata.go new file mode 100644 index 0000000000000..265ca4d64052a --- /dev/null +++ b/pkg/chunkenc/testdata/testdata.go @@ -0,0 +1,1013 @@ +package testdata + +import "strings" + +// LogString returns a test log line. Returns the same line for the same index. +func LogString(index int64) string { + if index > int64(len(logs)-1) { + index = index % int64(len(logs)) + } + return logs[index] +} + +var logs = strings.Split(`level=info ts=2019-12-12T15:00:08.325Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576130400000 maxt=1576152000000 ulid=01DVX9ZHNM71GRCJS7M34Q0EV7 sources="[01DVWNC6NWY1A60AZV3Z6DGS65 01DVWW7XXX75GHA6ZDTD170CSZ 01DVX33N5W86CWJJVRPAVXJRWJ]" duration=2.897213221s +level=info ts=2019-12-12T15:00:08.296Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576130400000 maxt=1576152000000 ulid=01DVX9ZHQRVN42AF196NYJ9C4C sources="[01DVWNC6NSPJRCSBZ4QD3SXS66 01DVWW7XY69Y4YT09HR0RSR8KY 01DVX33N5SMVPB1TMD9J1M8GGK]" duration=2.800759388s +level=info ts=2019-12-12T15:00:05.285Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1037 last=1039 duration=3.030078405s +level=info ts=2019-12-12T15:00:05.225Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1037 last=1039 duration=3.019791992s +level=info ts=2019-12-12T15:00:02.255Z caller=head.go:596 component=tsdb msg="head GC completed" duration=125.980176ms +level=info ts=2019-12-12T15:00:02.206Z caller=head.go:596 component=tsdb msg="head GC completed" duration=127.111334ms +level=info ts=2019-12-12T15:00:01.874Z caller=compact.go:496 component=tsdb msg="write block" mint=1576152000000 maxt=1576159200000 ulid=01DVX9ZCE8WZCTQJWSYDGHVQV8 duration=1.801853505s +level=info ts=2019-12-12T15:00:01.854Z caller=compact.go:496 component=tsdb msg="write block" mint=1576152000000 maxt=1576159200000 ulid=01DVX9ZCDWEBXRYWA7585TN2RV duration=1.794588392s +level=info ts=2019-12-12T13:00:05.461Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1034 last=1036 duration=3.044019343s +level=info ts=2019-12-12T13:00:05.332Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1034 last=1036 duration=3.040243488s +level=info ts=2019-12-12T13:00:02.417Z caller=head.go:596 component=tsdb msg="head GC completed" duration=128.883109ms +level=info ts=2019-12-12T13:00:02.291Z caller=head.go:596 component=tsdb msg="head GC completed" duration=126.278558ms +level=info ts=2019-12-12T13:00:02.048Z caller=compact.go:496 component=tsdb msg="write block" mint=1576144800000 maxt=1576152000000 ulid=01DVX33N5W86CWJJVRPAVXJRWJ duration=1.987867109s +level=info ts=2019-12-12T13:00:01.914Z caller=compact.go:496 component=tsdb msg="write block" mint=1576144800000 maxt=1576152000000 ulid=01DVX33N5SMVPB1TMD9J1M8GGK duration=1.856432758s +level=info ts=2019-12-12T12:58:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-12T12:52:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-12T11:00:05.320Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1031 last=1033 duration=2.999621843s +level=info ts=2019-12-12T11:00:05.315Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1031 last=1033 duration=2.962560692s +level=info ts=2019-12-12T11:00:02.352Z caller=head.go:596 component=tsdb msg="head GC completed" duration=131.600701ms +level=info ts=2019-12-12T11:00:02.321Z caller=head.go:596 component=tsdb msg="head GC completed" duration=134.547131ms +level=info ts=2019-12-12T11:00:01.975Z caller=compact.go:496 component=tsdb msg="write block" mint=1576137600000 maxt=1576144800000 ulid=01DVWW7XY69Y4YT09HR0RSR8KY duration=1.905948839s +level=info ts=2019-12-12T11:00:01.889Z caller=compact.go:496 component=tsdb msg="write block" mint=1576137600000 maxt=1576144800000 ulid=01DVWW7XXX75GHA6ZDTD170CSZ duration=1.828298188s +level=info ts=2019-12-12T10:55:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-12T10:49:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-12T10:33:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-12T10:25:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-12T10:21:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-12T10:14:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-12T09:00:16.465Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576065600000 maxt=1576130400000 ulid=01DVWNCFJRNW4RP8C56D4QNRXH sources="[01DVVC60FYTRXZ9457XT10Y7AH 01DVW0S6A5HFTYBYD34SGAZJSR 01DVWNCC9SYJDQP0Y2RXK8XJC9]" duration=7.289011992s +level=info ts=2019-12-12T09:00:15.812Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576065600000 maxt=1576130400000 ulid=01DVWNCF9JNSMSKZHW8STXQARA sources="[01DVVC60DBGMXD5DXR6Y5XWNXF 01DVW0S67R7JFBFTFWMNVS8YR3 01DVWNCC599NDRZWRRSZF4XGHF]" duration=6.930550254s +level=info ts=2019-12-12T09:00:08.717Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576108800000 maxt=1576130400000 ulid=01DVWNCC9SYJDQP0Y2RXK8XJC9 sources="[01DVW0S0XW63CVRA3EPRSC8NWQ 01DVW7MR5W18322RVFY6WM9GR2 01DVWEGFDW0C09KSCRQ2F8DGN3]" duration=2.900180235s +level=info ts=2019-12-12T09:00:08.440Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576108800000 maxt=1576130400000 ulid=01DVWNCC599NDRZWRRSZF4XGHF sources="[01DVW0S0XS1SQQQK3CQYCHN9HV 01DVW7MR5ZN3K38ZHBJ243HDZJ 01DVWEGFE0DGKKDG4V9AGAPPBQ]" duration=2.767053211s +level=info ts=2019-12-12T09:00:05.604Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1028 last=1030 duration=2.998418095s +level=info ts=2019-12-12T09:00:05.470Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1028 last=1030 duration=3.008684806s +level=info ts=2019-12-12T09:00:02.606Z caller=head.go:596 component=tsdb msg="head GC completed" duration=126.82085ms +level=info ts=2019-12-12T09:00:02.461Z caller=head.go:596 component=tsdb msg="head GC completed" duration=127.770206ms +level=info ts=2019-12-12T09:00:01.995Z caller=compact.go:496 component=tsdb msg="write block" mint=1576130400000 maxt=1576137600000 ulid=01DVWNC6NWY1A60AZV3Z6DGS65 duration=1.934602237s +level=info ts=2019-12-12T09:00:01.960Z caller=compact.go:496 component=tsdb msg="write block" mint=1576130400000 maxt=1576137600000 ulid=01DVWNC6NSPJRCSBZ4QD3SXS66 duration=1.902822647s +level=info ts=2019-12-12T08:59:54.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-12T08:54:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-12T08:12:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-12T08:05:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-12T07:00:05.421Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1025 last=1027 duration=3.037037204s +level=info ts=2019-12-12T07:00:05.263Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1025 last=1027 duration=2.984857831s +level=info ts=2019-12-12T07:00:02.383Z caller=head.go:596 component=tsdb msg="head GC completed" duration=126.79721ms +level=info ts=2019-12-12T07:00:02.278Z caller=head.go:596 component=tsdb msg="head GC completed" duration=131.228064ms +level=info ts=2019-12-12T07:00:02.052Z caller=compact.go:496 component=tsdb msg="write block" mint=1576123200000 maxt=1576130400000 ulid=01DVWEGFE0DGKKDG4V9AGAPPBQ duration=1.987940522s +level=info ts=2019-12-12T07:00:01.927Z caller=compact.go:496 component=tsdb msg="write block" mint=1576123200000 maxt=1576130400000 ulid=01DVWEGFDW0C09KSCRQ2F8DGN3 duration=1.866990386s +level=info ts=2019-12-12T05:00:05.355Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1022 last=1024 duration=3.046145151s +level=info ts=2019-12-12T05:00:05.309Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1022 last=1024 duration=3.019897535s +level=info ts=2019-12-12T05:00:02.309Z caller=head.go:596 component=tsdb msg="head GC completed" duration=128.294946ms +level=info ts=2019-12-12T05:00:02.289Z caller=head.go:596 component=tsdb msg="head GC completed" duration=145.150847ms +level=info ts=2019-12-12T05:00:01.939Z caller=compact.go:496 component=tsdb msg="write block" mint=1576116000000 maxt=1576123200000 ulid=01DVW7MR5ZN3K38ZHBJ243HDZJ duration=1.875204968s +level=info ts=2019-12-12T05:00:01.813Z caller=compact.go:496 component=tsdb msg="write block" mint=1576116000000 maxt=1576123200000 ulid=01DVW7MR5W18322RVFY6WM9GR2 duration=1.753345795s +level=info ts=2019-12-12T04:38:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-12T04:33:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-12T04:00:54.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-12T03:56:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-12T03:00:08.433Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576087200000 maxt=1576108800000 ulid=01DVW0S6A5HFTYBYD34SGAZJSR sources="[01DVVC5V5WESMMH77FZVCJ80Q8 01DVVK1JDWNVFGWS4JPY2K4CAS 01DVVSX9NWR5V8SSJAPKQ2TCTH]" duration=2.860812672s +level=info ts=2019-12-12T03:00:08.279Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576087200000 maxt=1576108800000 ulid=01DVW0S67R7JFBFTFWMNVS8YR3 sources="[01DVVC5V6145SMRFE0WR0P3YTQ 01DVVK1JE1SSYY4EKS4HAT4SK3 01DVVSX9NRE3DWK67A2J17BE0T]" duration=2.782760638s +level=info ts=2019-12-12T03:00:05.372Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1019 last=1021 duration=2.990754756s +level=info ts=2019-12-12T03:00:05.289Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1019 last=1021 duration=3.007795347s +level=info ts=2019-12-12T03:00:02.381Z caller=head.go:596 component=tsdb msg="head GC completed" duration=127.007667ms +level=info ts=2019-12-12T03:00:02.282Z caller=head.go:596 component=tsdb msg="head GC completed" duration=133.138336ms +level=info ts=2019-12-12T03:00:01.987Z caller=compact.go:496 component=tsdb msg="write block" mint=1576108800000 maxt=1576116000000 ulid=01DVW0S0XW63CVRA3EPRSC8NWQ duration=1.927367458s +level=info ts=2019-12-12T03:00:01.906Z caller=compact.go:496 component=tsdb msg="write block" mint=1576108800000 maxt=1576116000000 ulid=01DVW0S0XS1SQQQK3CQYCHN9HV duration=1.84874308s +level=info ts=2019-12-12T02:39:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-12T02:33:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-12T01:00:05.500Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1016 last=1018 duration=3.027246961s +level=info ts=2019-12-12T01:00:05.265Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1016 last=1018 duration=2.989822576s +level=info ts=2019-12-12T01:00:02.473Z caller=head.go:596 component=tsdb msg="head GC completed" duration=124.134851ms +level=info ts=2019-12-12T01:00:02.275Z caller=head.go:596 component=tsdb msg="head GC completed" duration=126.268006ms +level=info ts=2019-12-12T01:00:02.092Z caller=compact.go:496 component=tsdb msg="write block" mint=1576101600000 maxt=1576108800000 ulid=01DVVSX9NRE3DWK67A2J17BE0T duration=2.035218414s +level=info ts=2019-12-12T01:00:01.907Z caller=compact.go:496 component=tsdb msg="write block" mint=1576101600000 maxt=1576108800000 ulid=01DVVSX9NWR5V8SSJAPKQ2TCTH duration=1.847566214s +level=info ts=2019-12-11T23:00:05.552Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1013 last=1015 duration=3.042911717s +level=info ts=2019-12-11T23:00:05.255Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1013 last=1015 duration=3.007686626s +level=info ts=2019-12-11T23:00:02.509Z caller=head.go:596 component=tsdb msg="head GC completed" duration=135.735201ms +level=info ts=2019-12-11T23:00:02.247Z caller=head.go:596 component=tsdb msg="head GC completed" duration=126.374582ms +level=info ts=2019-12-11T23:00:02.154Z caller=compact.go:496 component=tsdb msg="write block" mint=1576094400000 maxt=1576101600000 ulid=01DVVK1JE1SSYY4EKS4HAT4SK3 duration=2.088724625s +level=info ts=2019-12-11T23:00:01.873Z caller=compact.go:496 component=tsdb msg="write block" mint=1576094400000 maxt=1576101600000 ulid=01DVVK1JDWNVFGWS4JPY2K4CAS duration=1.813033164s +level=info ts=2019-12-11T21:00:08.427Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576065600000 maxt=1576087200000 ulid=01DVVC60FYTRXZ9457XT10Y7AH sources="[01DVTQJNDXSY7N5V60ZX7X1C3J 01DVTYECNW5T3AHHB2EXATYFMJ 01DVV5A3XWVRTNS7G7BBDQ9G2W]" duration=2.925663083s +level=info ts=2019-12-11T21:00:08.281Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576065600000 maxt=1576087200000 ulid=01DVVC60DBGMXD5DXR6Y5XWNXF sources="[01DVTQJNDRV9NDCK9H2BCH04R0 01DVTYECNS4AZH3ZMCER87DYWG 01DVV5A3XRVMTB2E7V3MZ6RGCA]" duration=2.862756811s +level=info ts=2019-12-11T21:00:05.288Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1010 last=1012 duration=2.998716456s +level=info ts=2019-12-11T21:00:05.204Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1010 last=1012 duration=3.013679702s +level=info ts=2019-12-11T21:00:02.289Z caller=head.go:596 component=tsdb msg="head GC completed" duration=124.171081ms +level=info ts=2019-12-11T21:00:02.190Z caller=head.go:596 component=tsdb msg="head GC completed" duration=114.925741ms +level=info ts=2019-12-11T21:00:01.942Z caller=compact.go:496 component=tsdb msg="write block" mint=1576087200000 maxt=1576094400000 ulid=01DVVC5V5WESMMH77FZVCJ80Q8 duration=1.881893506s +level=info ts=2019-12-11T21:00:01.837Z caller=compact.go:496 component=tsdb msg="write block" mint=1576087200000 maxt=1576094400000 ulid=01DVVC5V6145SMRFE0WR0P3YTQ duration=1.772164011s +level=info ts=2019-12-11T19:00:05.276Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1007 last=1009 duration=3.031727362s +level=info ts=2019-12-11T19:00:05.222Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1007 last=1009 duration=3.003072336s +level=info ts=2019-12-11T19:00:02.244Z caller=head.go:596 component=tsdb msg="head GC completed" duration=125.675247ms +level=info ts=2019-12-11T19:00:02.219Z caller=head.go:596 component=tsdb msg="head GC completed" duration=127.466308ms +level=info ts=2019-12-11T19:00:01.888Z caller=compact.go:496 component=tsdb msg="write block" mint=1576080000000 maxt=1576087200000 ulid=01DVV5A3XRVMTB2E7V3MZ6RGCA duration=1.832443683s +level=info ts=2019-12-11T19:00:01.845Z caller=compact.go:496 component=tsdb msg="write block" mint=1576080000000 maxt=1576087200000 ulid=01DVV5A3XWVRTNS7G7BBDQ9G2W duration=1.784935995s +level=info ts=2019-12-11T18:31:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T18:24:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T17:00:05.233Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1004 last=1006 duration=3.008189996s +level=info ts=2019-12-11T17:00:05.223Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1004 last=1006 duration=2.97892s +level=info ts=2019-12-11T17:00:02.244Z caller=head.go:596 component=tsdb msg="head GC completed" duration=132.385042ms +level=info ts=2019-12-11T17:00:02.225Z caller=head.go:596 component=tsdb msg="head GC completed" duration=125.500534ms +level=info ts=2019-12-11T17:00:01.870Z caller=compact.go:496 component=tsdb msg="write block" mint=1576072800000 maxt=1576080000000 ulid=01DVTYECNW5T3AHHB2EXATYFMJ duration=1.810447322s +level=info ts=2019-12-11T17:00:01.870Z caller=compact.go:496 component=tsdb msg="write block" mint=1576072800000 maxt=1576080000000 ulid=01DVTYECNS4AZH3ZMCER87DYWG duration=1.813347748s +level=info ts=2019-12-11T15:00:16.297Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576000800000 maxt=1576065600000 ulid=01DVTQJY4WBY96QVV4XQJTR2JC sources="[01DVSECF6Q4JXFDGMFQB3J1Z9E 01DVT2ZN0DMXXJJDHKS0M8JWMS 01DVTQJTX0GZ1S7J51CN1RJNQX]" duration=7.308935842s +level=info ts=2019-12-11T15:00:15.941Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576000800000 maxt=1576065600000 ulid=01DVTQJY58270MEPGVGGDZZRKJ sources="[01DVSECF6J4NZRNHABZ2MSG7V7 01DVT2ZN4RB65KG77XPHPNVSAM 01DVTQJTYGFS18MWME9Z2NFJSW]" duration=6.941637414s +level=info ts=2019-12-11T15:00:08.544Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576044000000 maxt=1576065600000 ulid=01DVTQJTX0GZ1S7J51CN1RJNQX sources="[01DVT2ZFNYB7DEH57ZX4HW2DAV 01DVT9V6XW9ENV15NHKR20T9B4 01DVTGPY5WTBSSEQ37JQ2VPCTQ]" duration=2.880290482s +level=info ts=2019-12-11T15:00:08.541Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576044000000 maxt=1576065600000 ulid=01DVTQJTYGFS18MWME9Z2NFJSW sources="[01DVT2ZFP3R7RB9H6BS3JVAMXJ 01DVT9V6Y21E8YXRKNGA9RPB7D 01DVTGPY5XGARMV8B8VBWQ23W3]" duration=2.829184147s +level=info ts=2019-12-11T15:00:05.505Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1001 last=1003 duration=3.006477625s +level=info ts=2019-12-11T15:00:05.452Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=1001 last=1003 duration=2.990895181s +level=info ts=2019-12-11T15:00:02.498Z caller=head.go:596 component=tsdb msg="head GC completed" duration=129.237566ms +level=info ts=2019-12-11T15:00:02.461Z caller=head.go:596 component=tsdb msg="head GC completed" duration=129.961097ms +level=info ts=2019-12-11T15:00:02.022Z caller=compact.go:496 component=tsdb msg="write block" mint=1576065600000 maxt=1576072800000 ulid=01DVTQJNDRV9NDCK9H2BCH04R0 duration=1.96598488s +level=info ts=2019-12-11T15:00:01.933Z caller=compact.go:496 component=tsdb msg="write block" mint=1576065600000 maxt=1576072800000 ulid=01DVTQJNDXSY7N5V60ZX7X1C3J duration=1.871872199s +level=info ts=2019-12-11T14:15:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T14:07:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T13:18:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T13:12:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T13:00:05.395Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=998 last=1000 duration=3.010358861s +level=info ts=2019-12-11T13:00:05.249Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=998 last=1000 duration=3.032196282s +level=info ts=2019-12-11T13:00:02.385Z caller=head.go:596 component=tsdb msg="head GC completed" duration=131.568186ms +level=info ts=2019-12-11T13:00:02.217Z caller=head.go:596 component=tsdb msg="head GC completed" duration=136.017788ms +level=info ts=2019-12-11T13:00:02.021Z caller=compact.go:496 component=tsdb msg="write block" mint=1576058400000 maxt=1576065600000 ulid=01DVTGPY5XGARMV8B8VBWQ23W3 duration=1.959903s +level=info ts=2019-12-11T13:00:01.865Z caller=compact.go:496 component=tsdb msg="write block" mint=1576058400000 maxt=1576065600000 ulid=01DVTGPY5WTBSSEQ37JQ2VPCTQ duration=1.805149859s +level=info ts=2019-12-11T11:46:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T11:39:44.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T11:35:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T11:26:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T11:15:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T11:06:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T11:01:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T11:00:05.591Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=995 last=997 duration=3.063684941s +level=info ts=2019-12-11T11:00:05.297Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=995 last=997 duration=3.051047495s +level=info ts=2019-12-11T11:00:02.527Z caller=head.go:596 component=tsdb msg="head GC completed" duration=131.530749ms +level=info ts=2019-12-11T11:00:02.246Z caller=head.go:596 component=tsdb msg="head GC completed" duration=123.08975ms +level=info ts=2019-12-11T11:00:02.096Z caller=compact.go:496 component=tsdb msg="write block" mint=1576051200000 maxt=1576058400000 ulid=01DVT9V6Y21E8YXRKNGA9RPB7D duration=2.029825916s +level=info ts=2019-12-11T11:00:01.819Z caller=compact.go:496 component=tsdb msg="write block" mint=1576051200000 maxt=1576058400000 ulid=01DVT9V6XW9ENV15NHKR20T9B4 duration=1.7583013s +level=info ts=2019-12-11T10:54:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T10:46:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T10:39:44.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T10:34:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T10:26:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T10:18:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T10:12:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T09:56:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T09:48:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T09:00:08.553Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576022400000 maxt=1576044000000 ulid=01DVT2ZN4RB65KG77XPHPNVSAM sources="[01DVSEC9XXK1J3B0186KYQECZT 01DVSN815STR0D0B8245RWNF13 01DVSW3RDRKTJVEZWGYE07XBXE]" duration=2.896352595s +level=info ts=2019-12-11T09:00:08.407Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576022400000 maxt=1576044000000 ulid=01DVT2ZN0DMXXJJDHKS0M8JWMS sources="[01DVSEC9XW0W8V42SPRR0YMM0X 01DVSN815W8YTW3DPQTJVRNTS4 01DVSW3RDWE1WHSM8AEW0ARA3S]" duration=2.890101974s +level=info ts=2019-12-11T09:00:05.444Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=992 last=994 duration=3.058184317s +level=info ts=2019-12-11T09:00:05.306Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=992 last=994 duration=2.99204816s +level=info ts=2019-12-11T09:00:02.385Z caller=head.go:596 component=tsdb msg="head GC completed" duration=128.295437ms +level=info ts=2019-12-11T09:00:02.313Z caller=head.go:596 component=tsdb msg="head GC completed" duration=128.456638ms +level=info ts=2019-12-11T09:00:02.023Z caller=compact.go:496 component=tsdb msg="write block" mint=1576044000000 maxt=1576051200000 ulid=01DVT2ZFP3R7RB9H6BS3JVAMXJ duration=1.955843851s +level=info ts=2019-12-11T09:00:01.935Z caller=compact.go:496 component=tsdb msg="write block" mint=1576044000000 maxt=1576051200000 ulid=01DVT2ZFNYB7DEH57ZX4HW2DAV duration=1.873653026s +level=info ts=2019-12-11T07:00:05.441Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=989 last=991 duration=3.013763908s +level=info ts=2019-12-11T07:00:05.272Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=989 last=991 duration=2.979497994s +level=info ts=2019-12-11T07:00:02.427Z caller=head.go:596 component=tsdb msg="head GC completed" duration=126.635643ms +level=info ts=2019-12-11T07:00:02.293Z caller=head.go:596 component=tsdb msg="head GC completed" duration=121.051415ms +level=info ts=2019-12-11T07:00:02.056Z caller=compact.go:496 component=tsdb msg="write block" mint=1576036800000 maxt=1576044000000 ulid=01DVSW3RDWE1WHSM8AEW0ARA3S duration=1.995603695s +level=info ts=2019-12-11T07:00:01.941Z caller=compact.go:496 component=tsdb msg="write block" mint=1576036800000 maxt=1576044000000 ulid=01DVSW3RDRKTJVEZWGYE07XBXE duration=1.885680378s +level=info ts=2019-12-11T06:20:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T06:14:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T05:02:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=5 to=4 +level=info ts=2019-12-11T05:01:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=5 +level=info ts=2019-12-11T05:00:05.488Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=986 last=988 duration=3.043360624s +level=info ts=2019-12-11T05:00:05.288Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=986 last=988 duration=2.998209654s +level=info ts=2019-12-11T05:00:02.445Z caller=head.go:596 component=tsdb msg="head GC completed" duration=130.642245ms +level=info ts=2019-12-11T05:00:02.290Z caller=head.go:596 component=tsdb msg="head GC completed" duration=128.363621ms +level=info ts=2019-12-11T05:00:02.066Z caller=compact.go:496 component=tsdb msg="write block" mint=1576029600000 maxt=1576036800000 ulid=01DVSN815STR0D0B8245RWNF13 duration=2.008689142s +level=info ts=2019-12-11T05:00:01.938Z caller=compact.go:496 component=tsdb msg="write block" mint=1576029600000 maxt=1576036800000 ulid=01DVSN815W8YTW3DPQTJVRNTS4 duration=1.877943808s +level=info ts=2019-12-11T04:55:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T04:35:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T04:28:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T04:15:24.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T04:07:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T04:03:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T03:57:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T03:52:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T03:43:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T03:32:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T03:24:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T03:19:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T03:12:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T03:00:08.325Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576000800000 maxt=1576022400000 ulid=01DVSECF6Q4JXFDGMFQB3J1Z9E sources="[01DVRSS45W7DXE05RGBYGH58PY 01DVS0MVDWGK47AZ3HY5GQEMK4 01DVS7GJNW7BF3R6KK7GW291R0]" duration=2.861556831s +level=info ts=2019-12-11T03:00:08.255Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1576000800000 maxt=1576022400000 ulid=01DVSECF6J4NZRNHABZ2MSG7V7 sources="[01DVRSS4632MEB6SYC6SB7DTGE 01DVS0MVDR5Z67QJD6T94CXHRA 01DVS7GJNSYRFT48H9CDRP82YV]" duration=2.796902205s +level=info ts=2019-12-11T03:00:05.253Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=983 last=985 duration=3.004398083s +level=info ts=2019-12-11T03:00:05.245Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=983 last=985 duration=3.023743067s +level=info ts=2019-12-11T03:00:02.248Z caller=head.go:596 component=tsdb msg="head GC completed" duration=127.893231ms +level=info ts=2019-12-11T03:00:02.221Z caller=head.go:596 component=tsdb msg="head GC completed" duration=132.662929ms +level=info ts=2019-12-11T03:00:01.903Z caller=compact.go:496 component=tsdb msg="write block" mint=1576022400000 maxt=1576029600000 ulid=01DVSEC9XW0W8V42SPRR0YMM0X duration=1.842688968s +level=info ts=2019-12-11T03:00:01.847Z caller=compact.go:496 component=tsdb msg="write block" mint=1576022400000 maxt=1576029600000 ulid=01DVSEC9XXK1J3B0186KYQECZT duration=1.78558499s +level=info ts=2019-12-11T02:18:44.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=5 to=4 +level=info ts=2019-12-11T02:18:04.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=5 +level=info ts=2019-12-11T02:11:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T01:59:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-11T01:52:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-11T01:00:05.272Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=980 last=982 duration=3.025045534s +level=info ts=2019-12-11T01:00:05.189Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=980 last=982 duration=2.992866718s +level=info ts=2019-12-11T01:00:02.247Z caller=head.go:596 component=tsdb msg="head GC completed" duration=123.561834ms +level=info ts=2019-12-11T01:00:02.196Z caller=head.go:596 component=tsdb msg="head GC completed" duration=108.589195ms +level=info ts=2019-12-11T01:00:01.905Z caller=compact.go:496 component=tsdb msg="write block" mint=1576015200000 maxt=1576022400000 ulid=01DVS7GJNW7BF3R6KK7GW291R0 duration=1.844635186s +level=info ts=2019-12-11T01:00:01.866Z caller=compact.go:496 component=tsdb msg="write block" mint=1576015200000 maxt=1576022400000 ulid=01DVS7GJNSYRFT48H9CDRP82YV duration=1.809175377s +level=info ts=2019-12-11T00:31:25.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=7 to=5 +level=info ts=2019-12-11T00:30:25.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=10 to=7 +level=info ts=2019-12-11T00:29:55.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=14 to=10 +level=info ts=2019-12-11T00:29:25.064Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=85 to=14 +level=info ts=2019-12-11T00:29:15.063Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=5 to=85 +level=info ts=2019-12-10T23:00:05.385Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=977 last=979 duration=3.157877457s +level=info ts=2019-12-10T23:00:05.136Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=977 last=979 duration=3.013728541s +level=info ts=2019-12-10T23:00:02.227Z caller=head.go:596 component=tsdb msg="head GC completed" duration=133.349607ms +level=info ts=2019-12-10T23:00:02.123Z caller=head.go:596 component=tsdb msg="head GC completed" duration=110.615384ms +level=info ts=2019-12-10T23:00:01.832Z caller=compact.go:496 component=tsdb msg="write block" mint=1576008000000 maxt=1576015200000 ulid=01DVS0MVDWGK47AZ3HY5GQEMK4 duration=1.772580137s +level=info ts=2019-12-10T23:00:01.780Z caller=compact.go:496 component=tsdb msg="write block" mint=1576008000000 maxt=1576015200000 ulid=01DVS0MVDR5Z67QJD6T94CXHRA duration=1.724738556s +level=info ts=2019-12-10T21:00:18.426Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1575936000000 maxt=1576000800000 ulid=01DVRSSD7JM2Y5MQNCZ7QZRSWK sources="[01DVQGJZNRDFP0P161HP7GJX44 01DVR563YKZY789FPAM3DD8DKX 01DVRSS9Q04WN7F254ZCSQ4YP5]" duration=9.096172888s +level=info ts=2019-12-10T21:00:16.394Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1575936000000 maxt=1576000800000 ulid=01DVRSSCTH9ED87EXNSR9J8PE6 sources="[01DVQGJY3RF9X7R93QY6V579W3 01DVR563QG1PZ1AY7RPKSCMKND 01DVRSS9HMN2EC6QQQ2XP7R90D]" duration=7.481260173s +level=info ts=2019-12-10T21:00:08.859Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1575979200000 maxt=1576000800000 ulid=01DVRSS9Q04WN7F254ZCSQ4YP5 sources="[01DVR55YDW6Q96ZHGXD1T7HVF4 01DVRC1NNWE3ZMEQ6035ZJTF49 01DVRJXCXW4W35MBB4E9RXX1QD]" duration=3.130772971s +level=info ts=2019-12-10T21:00:08.473Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1575979200000 maxt=1576000800000 ulid=01DVRSS9HMN2EC6QQQ2XP7R90D sources="[01DVR55YE3CA9B12S48FTTFSVD 01DVRC1NP4CCWPRMCC7667R1FZ 01DVRJXCY0DY2R6DVWWGNXNPRQ]" duration=2.917254733s +level=info ts=2019-12-10T21:00:05.490Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=974 last=976 duration=3.159908932s +level=info ts=2019-12-10T21:00:05.339Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=974 last=976 duration=3.090229598s +level=info ts=2019-12-10T21:00:02.330Z caller=head.go:596 component=tsdb msg="head GC completed" duration=129.067188ms +level=info ts=2019-12-10T21:00:02.249Z caller=head.go:596 component=tsdb msg="head GC completed" duration=108.272575ms +level=info ts=2019-12-10T21:00:01.804Z caller=compact.go:496 component=tsdb msg="write block" mint=1576000800000 maxt=1576008000000 ulid=01DVRSS45W7DXE05RGBYGH58PY duration=1.743999568s +level=info ts=2019-12-10T21:00:01.774Z caller=compact.go:496 component=tsdb msg="write block" mint=1576000800000 maxt=1576008000000 ulid=01DVRSS4632MEB6SYC6SB7DTGE duration=1.706876662s +level=info ts=2019-12-10T20:35:34.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=3 to=4 +level=info ts=2019-12-10T20:29:14.793Z caller=queue_manager.go:559 component=remote queue=1:https://ops-us-east4.grafana.net/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-10T19:00:06.012Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=971 last=973 duration=3.248527735s +level=info ts=2019-12-10T19:00:05.641Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=971 last=973 duration=3.1047498s +level=info ts=2019-12-10T19:00:02.763Z caller=head.go:596 component=tsdb msg="head GC completed" duration=210.397069ms +level=info ts=2019-12-10T19:00:02.536Z caller=head.go:596 component=tsdb msg="head GC completed" duration=171.333573ms +level=info ts=2019-12-10T19:00:02.259Z caller=compact.go:496 component=tsdb msg="write block" mint=1575993600000 maxt=1576000800000 ulid=01DVRJXCXW4W35MBB4E9RXX1QD duration=2.199162459s +level=info ts=2019-12-10T19:00:02.080Z caller=compact.go:496 component=tsdb msg="write block" mint=1575993600000 maxt=1576000800000 ulid=01DVRJXCY0DY2R6DVWWGNXNPRQ duration=2.016321337s +level=info ts=2019-12-10T17:00:05.549Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=968 last=970 duration=3.183706512s +level=info ts=2019-12-10T17:00:05.319Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=968 last=970 duration=3.088304654s +level=info ts=2019-12-10T17:00:02.365Z caller=head.go:596 component=tsdb msg="head GC completed" duration=133.008474ms +level=info ts=2019-12-10T17:00:02.231Z caller=head.go:596 component=tsdb msg="head GC completed" duration=114.89207ms +level=info ts=2019-12-10T17:00:01.942Z caller=compact.go:496 component=tsdb msg="write block" mint=1575986400000 maxt=1575993600000 ulid=01DVRC1NNWE3ZMEQ6035ZJTF49 duration=1.881731957s +level=info ts=2019-12-10T17:00:01.864Z caller=compact.go:496 component=tsdb msg="write block" mint=1575986400000 maxt=1575993600000 ulid=01DVRC1NP4CCWPRMCC7667R1FZ duration=1.795832733s +level=info ts=2019-12-10T15:00:09.507Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1575957600000 maxt=1575979200000 ulid=01DVR563YKZY789FPAM3DD8DKX sources="[01DVQGJRNW1DY7K8KW6B2RY4FF 01DVQQEFXWYP18MDFKY58VJCSG 01DVQYA75W7ERXK6FBEMYYEX6S]" duration=3.791514409s +level=info ts=2019-12-10T15:00:08.520Z caller=compact.go:441 component=tsdb msg="compact blocks" count=3 mint=1575957600000 maxt=1575979200000 ulid=01DVR563QG1PZ1AY7RPKSCMKND sources="[01DVQGJRP3CC86KKVR5MZ1YYTK 01DVQQEFY0CEBFC4QE02GW9S4F 01DVQYA75TMRETNZRWPV46G5Y0]" duration=3.032106451s +level=info ts=2019-12-10T15:00:05.484Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=965 last=967 duration=3.124836463s +level=info ts=2019-12-10T15:00:05.277Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=965 last=967 duration=3.055778688s +level=info ts=2019-12-10T15:00:02.359Z caller=head.go:596 component=tsdb msg="head GC completed" duration=130.554076ms +level=info ts=2019-12-10T15:00:02.221Z caller=head.go:596 component=tsdb msg="head GC completed" duration=114.665423ms +level=info ts=2019-12-10T15:00:01.972Z caller=compact.go:496 component=tsdb msg="write block" mint=1575979200000 maxt=1575986400000 ulid=01DVR55YDW6Q96ZHGXD1T7HVF4 duration=1.912209972s +level=info ts=2019-12-10T15:00:01.878Z caller=compact.go:496 component=tsdb msg="write block" mint=1575979200000 maxt=1575986400000 ulid=01DVR55YE3CA9B12S48FTTFSVD duration=1.811316924s +2019-12-10 13:50:13.598594155 +0000 UTC +level=info ts=2019-12-10T13:50:13.596Z caller=main.go:771 msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml +level=info ts=2019-12-10T13:50:13.553Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" +level=info ts=2019-12-10T13:50:13.552Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" +level=info ts=2019-12-10T13:50:13.551Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" +level=info ts=2019-12-10T13:50:13.551Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" +level=info ts=2019-12-10T13:50:13.546Z caller=main.go:743 msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml +curl -X POST --fail -o - -sS http://localhost:80/prometheus/-/reload +2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..2019_12_05_07_22_08.390693530": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC +2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..data": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC +2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..data/prometheus.yml": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC +2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..data/recording.rules": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC +2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..data/alerts.rules": REMOVE at 2019-12-10 13:50:13.341280871 +0000 UTC +2019/12/10 13:50:13 DEBUG: Watching /etc/prometheus/..data +2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..data": CREATE at 2019-12-10 13:50:13.341280871 +0000 UTC +2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..data_tmp": RENAME at 2019-12-10 13:50:13.341280871 +0000 UTC +2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..2019_12_10_13_50_13.738754268": CHMOD at 2019-12-10 13:50:13.341280871 +0000 UTC +2019/12/10 13:50:13 DEBUG: Watching /etc/prometheus/..2019_12_10_13_50_13.738754268 +2019/12/10 13:50:13 DEBUG: "/etc/prometheus/..2019_12_10_13_50_13.738754268": CREATE at 2019-12-10 13:50:13.341280871 +0000 UTC +2019-12-10 13:49:53.296090488 +0000 UTC +level=info ts=2019-12-10T13:49:53.294Z caller=main.go:771 msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml +level=info ts=2019-12-10T13:49:53.254Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" +level=info ts=2019-12-10T13:49:53.253Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" +level=info ts=2019-12-10T13:49:53.252Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" +level=info ts=2019-12-10T13:49:53.251Z caller=kubernetes.go:192 component="discovery manager scrape" discovery=k8s msg="Using pod service account via in-cluster config" +level=info ts=2019-12-10T13:49:53.248Z caller=main.go:743 msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml +curl -X POST --fail -o - -sS http://localhost:80/prometheus/-/reload +2019/12/10 13:49:53 DEBUG: "/etc/prometheus/..2019_12_05_07_22_36.096562123": REMOVE at 2019-12-10 13:49:53.044039978 +0000 UTC +2019/12/10 13:49:53 DEBUG: "/etc/prometheus/..data": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC +2019/12/10 13:49:53 DEBUG: "/etc/prometheus/..data/prometheus.yml": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC +2019/12/10 13:49:53 DEBUG: "/etc/prometheus/..data/recording.rules": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC +2019/12/10 13:49:53 DEBUG: "/etc/prometheus/..data/alerts.rules": REMOVE at 2019-12-10 13:49:53.043039894 +0000 UTC +2019/12/10 13:49:53 DEBUG: Watching /etc/prometheus/..data +2019/12/10 13:49:53 DEBUG: "/etc/prometheus/..data": CREATE at 2019-12-10 13:49:53.043039894 +0000 UTC +2019/12/10 13:49:53 DEBUG: "/etc/prometheus/..data_tmp": RENAME at 2019-12-10 13:49:53.044039978 +0000 UTC +2019/12/10 13:49:53 DEBUG: "/etc/prometheus/..2019_12_10_13_49_53.355073198": CHMOD at 2019-12-10 13:49:53.043039894 +0000 UTC +2019/12/10 13:49:53 DEBUG: Watching /etc/prometheus/..2019_12_10_13_49_53.355073198 +2019/12/10 13:49:53 DEBUG: "/etc/prometheus/..2019_12_10_13_49_53.355073198": CREATE at 2019-12-10 13:49:53.043039894 +0000 UTC +level=info ts=2019-12-10T13:00:06.007Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=962 last=964 duration=3.145696569s +level=info ts=2019-12-10T13:00:05.601Z caller=head.go:666 component=tsdb msg="WAL checkpoint complete" first=962 last=964 duration=3.062580976s +level=info ts=2019-12-10T13:00:02.861Z caller=head.go:596 component=tsdb msg="head GC completed" duration=169.077152ms +level=info ts=2019-12-10T13:00:02.539Z caller=head.go:596 component=tsdb msg="head GC completed" duration=152.173262ms +level=info ts=2019-12-10T13:00:02.425Z caller=compact.go:496 component=tsdb msg="write block" mint=1575972000000 maxt=1575979200000 ulid=01DVQYA75W7ERXK6FBEMYYEX6S duration=2.364066751s +level=info ts=2019-12-10T13:00:02.150Z caller=compact.go:496 component=tsdb msg="write block" mint=1575972000000 maxt=1575979200000 ulid=01DVQYA75TMRETNZRWPV46G5Y0 duration=2.092629264s +level=info ts=2019-12-10T11:48:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=2 to=1 +level=info ts=2019-12-10T11:47:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=3 to=2 +level=info ts=2019-12-10T11:47:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=5 to=3 +level=info ts=2019-12-10T11:46:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=7 to=5 +level=info ts=2019-12-10T11:46:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=10 to=7 +level=info ts=2019-12-10T11:46:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=16 to=10 +level=info ts=2019-12-10T11:45:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=26 to=16 +level=info ts=2019-12-10T11:45:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=39 to=26 +level=info ts=2019-12-10T11:45:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=63 to=39 +level=info ts=2019-12-10T11:44:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=100 to=63 +level=info ts=2019-12-10T11:44:45.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=2 to=1 +level=info ts=2019-12-10T11:44:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=156 to=100 +level=info ts=2019-12-10T11:44:15.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=3 to=2 +level=info ts=2019-12-10T11:44:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=253 to=156 +level=info ts=2019-12-10T11:43:55.064Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=4 to=3 +level=info ts=2019-12-10T11:43:54.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=405 to=253 +level=info ts=2019-12-10T11:43:45.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=5 to=4 +level=info ts=2019-12-10T11:43:34.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=619 to=405 +level=info ts=2019-12-10T11:43:25.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=8 to=5 +level=info ts=2019-12-10T11:43:14.793Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=1000 to=619 +level=info ts=2019-12-10T11:43:05.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=11 to=8 +level=info ts=2019-12-10T11:42:45.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=22 to=11 +level=info ts=2019-12-10T11:42:25.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=16 to=22 +level=error ts=2019-12-10T11:42:11.074Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 400 Bad Request: out of order sample" +level=error ts=2019-12-10T11:42:11.073Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 400 Bad Request: out of order sample" +level=info ts=2019-12-10T11:42:05.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=11 to=16 +level=info ts=2019-12-10T11:41:55.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=8 to=11 +level=info ts=2019-12-10T11:39:35.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=6 to=8 +level=info ts=2019-12-10T11:38:55.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=4 to=6 +level=info ts=2019-12-10T11:38:35.064Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=2 to=4 +level=error ts=2019-12-10T11:38:12.281Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" +level=error ts=2019-12-10T11:38:12.281Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=69 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples" +level=error ts=2019-12-10T11:38:12.235Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.235Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.204Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:12.183Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" +level=error ts=2019-12-10T11:38:12.129Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" +level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=91 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples" +level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" +level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" +level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.127Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=56 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 56 samples" +level=error ts=2019-12-10T11:38:12.125Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=52 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 52 samples" +level=error ts=2019-12-10T11:38:12.124Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.123Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" +level=error ts=2019-12-10T11:38:12.122Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=69 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples" +level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=67 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples" +level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=78 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 78 samples" +level=error ts=2019-12-10T11:38:12.120Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.119Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.119Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.081Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.037Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:12.024Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" +level=error ts=2019-12-10T11:38:11.920Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.917Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.916Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.913Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" +level=error ts=2019-12-10T11:38:11.913Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" +level=error ts=2019-12-10T11:38:11.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.721Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.176Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" +level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" +level=error ts=2019-12-10T11:38:11.145Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" +level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" +level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:11.144Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=46 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 46 samples" +level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" +level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.143Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=92 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples" +level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" +level=error ts=2019-12-10T11:38:11.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.955Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.953Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" +level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.942Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.941Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=58 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples" +level=error ts=2019-12-10T11:38:10.941Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.940Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.940Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=23 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 23 samples" +level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=59 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples" +level=error ts=2019-12-10T11:38:10.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=81 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples" +level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.878Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" +level=error ts=2019-12-10T11:38:10.877Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" +level=error ts=2019-12-10T11:38:10.039Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.902Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" +level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=92 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples" +level=error ts=2019-12-10T11:38:09.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" +level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=73 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples" +level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=68 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 68 samples" +level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=35 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 35 samples" +level=error ts=2019-12-10T11:38:09.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:09.895Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:09.876Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" +level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=92 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 92 samples" +level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" +level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.840Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.832Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.825Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.825Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=17 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 17 samples" +level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.824Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=94 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples" +level=error ts=2019-12-10T11:38:09.806Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=81 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples" +level=error ts=2019-12-10T11:38:09.736Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.736Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=48 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 48 samples" +level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=67 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples" +level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=43 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 43 samples" +level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" +level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.735Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=19 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 19 samples" +level=error ts=2019-12-10T11:38:09.733Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" +level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=50 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 50 samples" +level=error ts=2019-12-10T11:38:09.700Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" +level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" +level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" +level=error ts=2019-12-10T11:38:08.797Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" +level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=37 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples" +level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=67 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples" +level=error ts=2019-12-10T11:38:08.796Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" +level=error ts=2019-12-10T11:38:08.795Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=74 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples" +level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=94 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples" +level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" +level=error ts=2019-12-10T11:38:08.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" +level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.694Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.693Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=58 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=91 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=59 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.692Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.691Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.678Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" +level=error ts=2019-12-10T11:38:08.677Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.648Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.647Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" +level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=71 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples" +level=error ts=2019-12-10T11:38:08.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" +level=error ts=2019-12-10T11:38:08.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" +level=error ts=2019-12-10T11:38:08.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.553Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" +level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=73 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples" +level=error ts=2019-12-10T11:38:08.552Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=87 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 87 samples" +level=error ts=2019-12-10T11:38:08.541Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.501Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:08.501Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=91 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples" +level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.497Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" +level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" +level=error ts=2019-12-10T11:38:08.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=65 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples" +level=error ts=2019-12-10T11:38:08.441Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" +level=error ts=2019-12-10T11:38:08.433Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.431Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" +level=error ts=2019-12-10T11:38:08.407Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" +level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.394Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" +level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" +level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:08.393Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.604Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.603Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=60 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples" +level=error ts=2019-12-10T11:38:07.602Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=71 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples" +level=error ts=2019-12-10T11:38:07.601Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:07.600Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=93 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 93 samples" +level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" +level=error ts=2019-12-10T11:38:07.599Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.598Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.597Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" +level=error ts=2019-12-10T11:38:07.596Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.595Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=74 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples" +level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" +level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.594Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=91 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 91 samples" +level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=73 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 73 samples" +level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=81 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples" +level=error ts=2019-12-10T11:38:07.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" +level=error ts=2019-12-10T11:38:07.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=53 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 53 samples" +level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=39 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 39 samples" +level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.587Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.513Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=59 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 59 samples" +level=error ts=2019-12-10T11:38:07.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.510Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" +level=error ts=2019-12-10T11:38:07.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=67 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 67 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=60 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.499Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.494Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.489Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.488Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.484Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.483Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.483Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=66 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples" +level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=71 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples" +level=error ts=2019-12-10T11:38:07.480Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=66 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples" +level=error ts=2019-12-10T11:38:07.478Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=76 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 76 samples" +level=error ts=2019-12-10T11:38:07.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" +level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.455Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=65 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples" +level=error ts=2019-12-10T11:38:07.454Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" +level=error ts=2019-12-10T11:38:07.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=37 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=30 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 30 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=74 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" +level=error ts=2019-12-10T11:38:07.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" +level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" +level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=74 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples" +level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=63 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 63 samples" +level=error ts=2019-12-10T11:38:07.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" +level=error ts=2019-12-10T11:38:07.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" +level=error ts=2019-12-10T11:38:07.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=57 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 57 samples" +level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.444Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" +level=error ts=2019-12-10T11:38:07.443Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.390Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.389Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" +level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.388Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=89 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 89 samples" +level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:07.387Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=63 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 63 samples" +level=error ts=2019-12-10T11:38:07.376Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.358Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" +level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" +level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.357Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" +level=error ts=2019-12-10T11:38:07.310Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.259Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.259Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" +level=error ts=2019-12-10T11:38:07.045Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.044Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" +level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.043Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.043Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=69 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples" +level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.042Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:07.041Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=49 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 49 samples" +level=error ts=2019-12-10T11:38:06.947Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=81 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples" +level=error ts=2019-12-10T11:38:06.947Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.946Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.944Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.943Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" +level=error ts=2019-12-10T11:38:06.939Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.938Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" +level=error ts=2019-12-10T11:38:06.937Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.848Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=54 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 54 samples" +level=error ts=2019-12-10T11:38:06.848Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.842Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.841Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.747Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" +level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=65 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 65 samples" +level=error ts=2019-12-10T11:38:06.745Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.715Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.646Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" +level=error ts=2019-12-10T11:38:06.646Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=37 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 37 samples" +level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" +level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:06.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.639Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.638Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=93 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 93 samples" +level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" +level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.591Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=56 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 56 samples" +level=error ts=2019-12-10T11:38:06.590Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.590Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" +level=error ts=2019-12-10T11:38:06.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.589Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.588Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" +level=error ts=2019-12-10T11:38:06.588Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.581Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.581Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" +level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.563Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.562Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.562Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.561Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.560Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.551Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.550Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.544Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.544Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.543Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.542Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.540Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.538Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.538Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.512Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.506Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.505Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=69 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 69 samples" +level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" +level=error ts=2019-12-10T11:38:06.504Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:06.489Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=77 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 77 samples" +level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" +level=error ts=2019-12-10T11:38:06.461Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.460Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" +level=error ts=2019-12-10T11:38:06.459Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.453Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.452Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" +level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.451Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.450Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=85 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 85 samples" +level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.449Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" +level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.448Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=70 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 70 samples" +level=error ts=2019-12-10T11:38:06.447Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=66 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples" +level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" +level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=33 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 33 samples" +level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=75 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 75 samples" +level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=71 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples" +level=error ts=2019-12-10T11:38:06.446Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=62 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 62 samples" +level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" +level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=58 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 58 samples" +level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" +level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=72 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 72 samples" +level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" +level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:06.445Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" +level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.355Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.353Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.350Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.349Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=60 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 60 samples" +level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=22 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 22 samples" +level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.348Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.347Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.347Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.346Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=82 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 82 samples" +level=error ts=2019-12-10T11:38:06.346Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.340Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=74 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 74 samples" +level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.339Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.338Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.338Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.337Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.330Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.329Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.329Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" +level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.328Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:06.327Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" +level=error ts=2019-12-10T11:38:06.244Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.231Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.230Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.229Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=94 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 94 samples" +level=error ts=2019-12-10T11:38:06.206Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.205Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.198Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.092Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=81 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 81 samples" +level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.091Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.088Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:06.087Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:05.593Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=97 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 97 samples" +level=error ts=2019-12-10T11:38:05.592Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=info ts=2019-12-10T11:38:05.063Z caller=queue_manager.go:559 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="Remote storage resharding" from=1 to=2 +level=error ts=2019-12-10T11:38:04.977Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.976Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:04.821Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.815Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.720Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.719Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=98 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 98 samples" +level=error ts=2019-12-10T11:38:04.719Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.717Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.716Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=87 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 87 samples" +level=error ts=2019-12-10T11:38:04.716Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=99 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 99 samples" +level=error ts=2019-12-10T11:38:04.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.710Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.709Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=66 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 66 samples" +level=error ts=2019-12-10T11:38:04.709Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=64 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 64 samples" +level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" +level=error ts=2019-12-10T11:38:04.631Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.630Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.627Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.626Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.621Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.620Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.619Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.136Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.136Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.135Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.018Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.018Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.017Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.016Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.015Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.015Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.013Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" +level=error ts=2019-12-10T11:38:04.012Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.010Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.009Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.009Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.008Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" +level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.007Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.005Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.005Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.004Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" +level=error ts=2019-12-10T11:38:04.003Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.002Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.002Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:04.001Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.923Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.923Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=79 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 79 samples" +level=error ts=2019-12-10T11:38:03.922Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.908Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.908Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.907Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=96 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 96 samples" +level=error ts=2019-12-10T11:38:03.907Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.906Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=83 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 83 samples" +level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=71 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 71 samples" +level=error ts=2019-12-10T11:38:03.904Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.902Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.901Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=86 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 86 samples" +level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=84 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 84 samples" +level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.900Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.899Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=90 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 90 samples" +level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.898Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.897Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=80 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 80 samples" +level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.723Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=76 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 76 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.722Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.711Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=95 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 95 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.644Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.643Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.642Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.641Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=88 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 88 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=78 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 78 samples" +level=error ts=2019-12-10T11:38:03.640Z caller=queue_manager.go:770 component=remote queue=0:http://cortex-gw.cortex-tsdb-dev.svc.cluster.local/api/prom/push msg="non-recoverable error" count=100 err="server returned HTTP status 429 Too Many Requests: ingestion rate limit (200000) exceeded while adding 100 samples" +`, "\n") diff --git a/pkg/chunkenc/util_test.go b/pkg/chunkenc/util_test.go new file mode 100644 index 0000000000000..f906bd93442b4 --- /dev/null +++ b/pkg/chunkenc/util_test.go @@ -0,0 +1,53 @@ +package chunkenc + +import ( + "time" + + "github.com/grafana/loki/pkg/chunkenc/testdata" + "github.com/grafana/loki/pkg/logproto" +) + +func logprotoEntry(ts int64, line string) *logproto.Entry { + return &logproto.Entry{ + Timestamp: time.Unix(0, ts), + Line: line, + } +} + +func generateData(enc Encoding) []Chunk { + chunks := []Chunk{} + i := int64(0) + for n := 0; n < 50; n++ { + entry := logprotoEntry(0, testdata.LogString(0)) + c := NewMemChunk(enc) + for c.SpaceFor(entry) { + _ = c.Append(entry) + i++ + entry = logprotoEntry(i, testdata.LogString(i)) + } + c.Close() + chunks = append(chunks, c) + } + return chunks +} + +func fillChunk(c Chunk) int64 { + i := int64(0) + inserted := int64(0) + entry := &logproto.Entry{ + Timestamp: time.Unix(0, 0), + Line: testdata.LogString(i), + } + for c.SpaceFor(entry) { + err := c.Append(entry) + if err != nil { + panic(err) + } + i++ + inserted += int64(len(entry.Line)) + entry.Timestamp = time.Unix(0, i) + entry.Line = testdata.LogString(i) + + } + return inserted +} diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index ccf2aee0f25ac..53bc7a0f2bca3 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -4,6 +4,7 @@ import ( "context" "errors" "flag" + "fmt" "net/http" "sync" "time" @@ -18,6 +19,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/util" + "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/ingester/client" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/util/validation" @@ -48,6 +50,7 @@ type Config struct { MaxChunkIdle time.Duration `yaml:"chunk_idle_period"` BlockSize int `yaml:"chunk_block_size"` TargetChunkSize int `yaml:"chunk_target_size"` + ChunkEncoding string `yaml:"chunk_encoding"` // For testing, you can override the address and ID of this ingester. ingesterClientFactory func(cfg client.Config, addr string) (grpc_health_v1.HealthClient, error) @@ -65,6 +68,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.MaxChunkIdle, "ingester.chunks-idle-period", 30*time.Minute, "") f.IntVar(&cfg.BlockSize, "ingester.chunks-block-size", 256*1024, "") f.IntVar(&cfg.TargetChunkSize, "ingester.chunk-target-size", 0, "") + f.StringVar(&cfg.ChunkEncoding, "ingester.chunk-encoding", chunkenc.EncGZIP.String(), fmt.Sprintf("The algorithm to use for compressing chunk. (%s)", chunkenc.SupportedEncoding())) } // Ingester builds chunks for incoming log streams. @@ -89,7 +93,8 @@ type Ingester struct { flushQueues []*util.PriorityQueue flushQueuesDone sync.WaitGroup - limits *validation.Overrides + limits *validation.Overrides + factory func() chunkenc.Chunk } // ChunkStore is the interface we need to store chunks. @@ -102,6 +107,10 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid if cfg.ingesterClientFactory == nil { cfg.ingesterClientFactory = client.New } + enc, err := chunkenc.ParseEncoding(cfg.ChunkEncoding) + if err != nil { + return nil, err + } i := &Ingester{ cfg: cfg, @@ -112,6 +121,9 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), quitting: make(chan struct{}), limits: limits, + factory: func() chunkenc.Chunk { + return chunkenc.NewMemChunkSize(enc, cfg.BlockSize, cfg.TargetChunkSize) + }, } i.flushQueuesDone.Add(cfg.ConcurrentFlushes) @@ -120,7 +132,6 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid go i.flushLoop(j) } - var err error i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester") if err != nil { return nil, err @@ -191,7 +202,7 @@ func (i *Ingester) getOrCreateInstance(instanceID string) *instance { defer i.instancesMtx.Unlock() inst, ok = i.instances[instanceID] if !ok { - inst = newInstance(instanceID, i.cfg.BlockSize, i.cfg.TargetChunkSize, i.limits) + inst = newInstance(instanceID, i.factory, i.limits) i.instances[instanceID] = inst } return inst diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index f7a295e515189..a9d4d6e78dbae 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -16,6 +16,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/index" cutil "github.com/cortexproject/cortex/pkg/util" + "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/helpers" "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" @@ -60,15 +61,14 @@ type instance struct { streamsCreatedTotal prometheus.Counter streamsRemovedTotal prometheus.Counter - blockSize int - targetChunkSize int // Compressed bytes - tailers map[uint32]*tailer - tailerMtx sync.RWMutex + tailers map[uint32]*tailer + tailerMtx sync.RWMutex - limits *validation.Overrides + limits *validation.Overrides + factory func() chunkenc.Chunk } -func newInstance(instanceID string, blockSize, targetChunkSize int, limits *validation.Overrides) *instance { +func newInstance(instanceID string, factory func() chunkenc.Chunk, limits *validation.Overrides) *instance { i := &instance{ streams: map[model.Fingerprint]*stream{}, index: index.New(), @@ -77,10 +77,9 @@ func newInstance(instanceID string, blockSize, targetChunkSize int, limits *vali streamsCreatedTotal: streamsCreatedTotal.WithLabelValues(instanceID), streamsRemovedTotal: streamsRemovedTotal.WithLabelValues(instanceID), - blockSize: blockSize, - targetChunkSize: targetChunkSize, - tailers: map[uint32]*tailer{}, - limits: limits, + factory: factory, + tailers: map[uint32]*tailer{}, + limits: limits, } i.mapper = newFPMapper(i.getLabelsFromFingerprint) return i @@ -98,7 +97,7 @@ func (i *instance) consumeChunk(ctx context.Context, labels []client.LabelAdapte stream, ok := i.streams[fp] if !ok { sortedLabels := i.index.Add(labels, fp) - stream = newStream(fp, sortedLabels, i.blockSize, i.targetChunkSize) + stream = newStream(fp, sortedLabels, i.factory) i.streams[fp] = stream i.streamsCreatedTotal.Inc() memoryStreams.Inc() @@ -156,7 +155,7 @@ func (i *instance) getOrCreateStream(labels []client.LabelAdapter) (*stream, err return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "per-user streams limit (%d) exceeded", i.limits.MaxStreamsPerUser(i.instanceID)) } sortedLabels := i.index.Add(labels, fp) - stream = newStream(fp, sortedLabels, i.blockSize, i.targetChunkSize) + stream = newStream(fp, sortedLabels, i.factory) i.streams[fp] = stream memoryStreams.Inc() i.streamsCreatedTotal.Inc() diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go index c25b841efe6c5..39daee96374f2 100644 --- a/pkg/ingester/instance_test.go +++ b/pkg/ingester/instance_test.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" + "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/logproto" "github.com/stretchr/testify/require" @@ -17,11 +18,15 @@ import ( "github.com/grafana/loki/pkg/util/validation" ) +var defaultFactory = func() chunkenc.Chunk { + return chunkenc.NewMemChunkSize(chunkenc.EncGZIP, 512, 0) +} + func TestLabelsCollisions(t *testing.T) { o, err := validation.NewOverrides(validation.Limits{MaxStreamsPerUser: 1000}) require.NoError(t, err) - i := newInstance("test", 512, 0, o) + i := newInstance("test", defaultFactory, o) // avoid entries from the future. tt := time.Now().Add(-5 * time.Minute) @@ -47,7 +52,7 @@ func TestConcurrentPushes(t *testing.T) { o, err := validation.NewOverrides(validation.Limits{MaxStreamsPerUser: 1000}) require.NoError(t, err) - inst := newInstance("test", 512, 0, o) + inst := newInstance("test", defaultFactory, o) const ( concurrent = 10 diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go index b881b86a92aaa..706a141ff1b6e 100644 --- a/pkg/ingester/stream.go +++ b/pkg/ingester/stream.go @@ -54,11 +54,10 @@ func init() { type stream struct { // Newest chunk at chunks[n-1]. // Not thread-safe; assume accesses to this are locked by caller. - chunks []chunkDesc - fp model.Fingerprint // possibly remapped fingerprint, used in the streams map - labels labels.Labels - blockSize int - targetChunkSize int // Compressed bytes + chunks []chunkDesc + fp model.Fingerprint // possibly remapped fingerprint, used in the streams map + labels labels.Labels + factory func() chunkenc.Chunk tailers map[uint32]*tailer tailerMtx sync.RWMutex @@ -77,13 +76,12 @@ type entryWithError struct { e error } -func newStream(fp model.Fingerprint, labels labels.Labels, blockSize, targetChunkSize int) *stream { +func newStream(fp model.Fingerprint, labels labels.Labels, factory func() chunkenc.Chunk) *stream { return &stream{ - fp: fp, - labels: labels, - blockSize: blockSize, - targetChunkSize: targetChunkSize, - tailers: map[uint32]*tailer{}, + fp: fp, + labels: labels, + factory: factory, + tailers: map[uint32]*tailer{}, } } @@ -105,7 +103,7 @@ func (s *stream) consumeChunk(_ context.Context, chunk *logproto.Chunk) error { func (s *stream) Push(_ context.Context, entries []logproto.Entry) error { if len(s.chunks) == 0 { s.chunks = append(s.chunks, chunkDesc{ - chunk: chunkenc.NewMemChunkSize(chunkenc.EncGZIP, s.blockSize, s.targetChunkSize), + chunk: s.factory(), }) chunksCreatedTotal.Inc() } @@ -132,7 +130,7 @@ func (s *stream) Push(_ context.Context, entries []logproto.Entry) error { chunksCreatedTotal.Inc() s.chunks = append(s.chunks, chunkDesc{ - chunk: chunkenc.NewMemChunkSize(chunkenc.EncGZIP, s.blockSize, s.targetChunkSize), + chunk: s.factory(), }) chunk = &s.chunks[len(s.chunks)-1] } diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE index 74487567632c8..1eb75ef68e448 100644 --- a/vendor/github.com/klauspost/compress/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -1,4 +1,5 @@ Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go deleted file mode 100644 index 8298d309aefaa..0000000000000 --- a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go +++ /dev/null @@ -1,42 +0,0 @@ -//+build !noasm -//+build !appengine -//+build !gccgo - -// Copyright 2015, Klaus Post, see LICENSE for details. - -package flate - -import ( - "github.com/klauspost/cpuid" -) - -// crc32sse returns a hash for the first 4 bytes of the slice -// len(a) must be >= 4. -//go:noescape -func crc32sse(a []byte) uint32 - -// crc32sseAll calculates hashes for each 4-byte set in a. -// dst must be east len(a) - 4 in size. -// The size is not checked by the assembly. -//go:noescape -func crc32sseAll(a []byte, dst []uint32) - -// matchLenSSE4 returns the number of matching bytes in a and b -// up to length 'max'. Both slices must be at least 'max' -// bytes in size. -// -// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions. -// -//go:noescape -func matchLenSSE4(a, b []byte, max int) int - -// histogram accumulates a histogram of b in h. -// h must be at least 256 entries in length, -// and must be cleared before calling this function. -//go:noescape -func histogram(b []byte, h []int32) - -// Detect SSE 4.2 feature. -func init() { - useSSE42 = cpuid.CPU.SSE42() -} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s deleted file mode 100644 index a7994372702b7..0000000000000 --- a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s +++ /dev/null @@ -1,214 +0,0 @@ -//+build !noasm -//+build !appengine -//+build !gccgo - -// Copyright 2015, Klaus Post, see LICENSE for details. - -// func crc32sse(a []byte) uint32 -TEXT ·crc32sse(SB), 4, $0 - MOVQ a+0(FP), R10 - XORQ BX, BX - - // CRC32 dword (R10), EBX - BYTE $0xF2; BYTE $0x41; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0x1a - - MOVL BX, ret+24(FP) - RET - -// func crc32sseAll(a []byte, dst []uint32) -TEXT ·crc32sseAll(SB), 4, $0 - MOVQ a+0(FP), R8 // R8: src - MOVQ a_len+8(FP), R10 // input length - MOVQ dst+24(FP), R9 // R9: dst - SUBQ $4, R10 - JS end - JZ one_crc - MOVQ R10, R13 - SHRQ $2, R10 // len/4 - ANDQ $3, R13 // len&3 - XORQ BX, BX - ADDQ $1, R13 - TESTQ R10, R10 - JZ rem_loop - -crc_loop: - MOVQ (R8), R11 - XORQ BX, BX - XORQ DX, DX - XORQ DI, DI - MOVQ R11, R12 - SHRQ $8, R11 - MOVQ R12, AX - MOVQ R11, CX - SHRQ $16, R12 - SHRQ $16, R11 - MOVQ R12, SI - - // CRC32 EAX, EBX - BYTE $0xF2; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0xd8 - - // CRC32 ECX, EDX - BYTE $0xF2; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0xd1 - - // CRC32 ESI, EDI - BYTE $0xF2; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0xfe - MOVL BX, (R9) - MOVL DX, 4(R9) - MOVL DI, 8(R9) - - XORQ BX, BX - MOVL R11, AX - - // CRC32 EAX, EBX - BYTE $0xF2; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0xd8 - MOVL BX, 12(R9) - - ADDQ $16, R9 - ADDQ $4, R8 - XORQ BX, BX - SUBQ $1, R10 - JNZ crc_loop - -rem_loop: - MOVL (R8), AX - - // CRC32 EAX, EBX - BYTE $0xF2; BYTE $0x0f - BYTE $0x38; BYTE $0xf1; BYTE $0xd8 - - MOVL BX, (R9) - ADDQ $4, R9 - ADDQ $1, R8 - XORQ BX, BX - SUBQ $1, R13 - JNZ rem_loop - -end: - RET - -one_crc: - MOVQ $1, R13 - XORQ BX, BX - JMP rem_loop - -// func matchLenSSE4(a, b []byte, max int) int -TEXT ·matchLenSSE4(SB), 4, $0 - MOVQ a_base+0(FP), SI - MOVQ b_base+24(FP), DI - MOVQ DI, DX - MOVQ max+48(FP), CX - -cmp8: - // As long as we are 8 or more bytes before the end of max, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ CX, $8 - JLT cmp1 - MOVQ (SI), AX - MOVQ (DI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, SI - ADDQ $8, DI - SUBQ $8, CX - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, DI - - // Subtract off &b[0] to convert from &b[ret] to ret, and return. - SUBQ DX, DI - MOVQ DI, ret+56(FP) - RET - -cmp1: - // In the slices' tail, compare 1 byte at a time. - CMPQ CX, $0 - JEQ matchLenEnd - MOVB (SI), AX - MOVB (DI), BX - CMPB AX, BX - JNE matchLenEnd - ADDQ $1, SI - ADDQ $1, DI - SUBQ $1, CX - JMP cmp1 - -matchLenEnd: - // Subtract off &b[0] to convert from &b[ret] to ret, and return. - SUBQ DX, DI - MOVQ DI, ret+56(FP) - RET - -// func histogram(b []byte, h []int32) -TEXT ·histogram(SB), 4, $0 - MOVQ b+0(FP), SI // SI: &b - MOVQ b_len+8(FP), R9 // R9: len(b) - MOVQ h+24(FP), DI // DI: Histogram - MOVQ R9, R8 - SHRQ $3, R8 - JZ hist1 - XORQ R11, R11 - -loop_hist8: - MOVQ (SI), R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - MOVB R10, R11 - INCL (DI)(R11*4) - SHRQ $8, R10 - - INCL (DI)(R10*4) - - ADDQ $8, SI - DECQ R8 - JNZ loop_hist8 - -hist1: - ANDQ $7, R9 - JZ end_hist - XORQ R10, R10 - -loop_hist1: - MOVB (SI), R10 - INCL (DI)(R10*4) - INCQ SI - DECQ R9 - JNZ loop_hist1 - -end_hist: - RET diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go deleted file mode 100644 index dcf43bd50a80e..0000000000000 --- a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go +++ /dev/null @@ -1,35 +0,0 @@ -//+build !amd64 noasm appengine gccgo - -// Copyright 2015, Klaus Post, see LICENSE for details. - -package flate - -func init() { - useSSE42 = false -} - -// crc32sse should never be called. -func crc32sse(a []byte) uint32 { - panic("no assembler") -} - -// crc32sseAll should never be called. -func crc32sseAll(a []byte, dst []uint32) { - panic("no assembler") -} - -// matchLenSSE4 should never be called. -func matchLenSSE4(a, b []byte, max int) int { - panic("no assembler") - return 0 -} - -// histogram accumulates a histogram of b in h. -// -// len(h) must be >= 256, and h's elements must be all zeroes. -func histogram(b []byte, h []int32) { - h = h[:256] - for _, t := range b { - h[t]++ - } -} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 6287951204e8d..20c94f5968439 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -50,8 +50,6 @@ const ( skipNever = math.MaxInt32 ) -var useSSE42 bool - type compressionLevel struct { good, lazy, nice, chain, fastSkipHashing, level int } @@ -97,9 +95,8 @@ type advancedState struct { hashOffset int // input window: unprocessed data is window[index:windowEnd] - index int - bulkHasher func([]byte, []uint32) - hashMatch [maxMatchLength + minMatchLength]uint32 + index int + hashMatch [maxMatchLength + minMatchLength]uint32 } type compressor struct { @@ -120,7 +117,7 @@ type compressor struct { // queued output tokens tokens tokens - snap fastEnc + fast fastEnc state *advancedState } @@ -164,14 +161,14 @@ func (d *compressor) fillDeflate(b []byte) int { return n } -func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { +func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { if index > 0 || eof { var window []byte if d.blockStart <= index { window = d.window[d.blockStart:index] } d.blockStart = index - d.w.writeBlock(tok.tokens[:tok.n], eof, window) + d.w.writeBlock(tok, eof, window) return d.w.err } return nil @@ -180,20 +177,20 @@ func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { // writeBlockSkip writes the current block and uses the number of tokens // to determine if the block should be stored on no matches, or // only huffman encoded. -func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { +func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { if index > 0 || eof { if d.blockStart <= index { window := d.window[d.blockStart:index] // If we removed less than a 64th of all literals // we huffman compress the block. if int(tok.n) > len(window)-int(tok.n>>6) { - d.w.writeBlockHuff(eof, window) + d.w.writeBlockHuff(eof, window, d.sync) } else { // Write a dynamic huffman block. - d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) } } else { - d.w.writeBlock(tok.tokens[:tok.n], eof, nil) + d.w.writeBlock(tok, eof, nil) } d.blockStart = index return d.w.err @@ -208,8 +205,16 @@ func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { func (d *compressor) fillWindow(b []byte) { // Do not fill window if we are in store-only mode, // use constant or Snappy compression. - switch d.compressionLevel.level { - case 0, 1, 2: + if d.level == 0 { + return + } + if d.fast != nil { + // encode the last data, but discard the result + if len(b) > maxMatchOffset { + b = b[len(b)-maxMatchOffset:] + } + d.fast.Encode(&d.tokens, b) + d.tokens.Reset() return } s := d.state @@ -236,7 +241,7 @@ func (d *compressor) fillWindow(b []byte) { } dst := s.hashMatch[:dstSize] - s.bulkHasher(tocheck, dst) + bulkHash4(tocheck, dst) var newH uint32 for i, val := range dst { di := i + startindex @@ -284,62 +289,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { - n := matchLen(win[i:], wPos, minMatchLook) - - if n > length && (n > minMatchLength || pos-i <= 4096) { - length = n - offset = pos - i - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - if i == minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex || i < 0 { - break - } - } - return -} - -// Try to find a match starting at index whose length is greater than prevSize. -// We only look at chainCount possibilities before giving up. -// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } - - win := d.window[0 : pos+minMatchLook] - - // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } - - // If we've got a match that's good enough, only look in 1/4 the chain. - tries := d.chain - length = prevLength - if length >= d.good { - tries >>= 2 - } - - wEnd := win[pos+length] - wPos := win[pos:] - minIndex := pos - windowSize - - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLenSSE4(win[i:], wPos, minMatchLook) + n := matchLen(win[i:i+minMatchLook], wPos) if n > length && (n > minMatchLength || pos-i <= 4096) { length = n @@ -372,42 +322,27 @@ func (d *compressor) writeStoredBlock(buf []byte) error { return d.w.err } -const hashmul = 0x1e35a7bd - // hash4 returns a hash representation of the first 4 bytes // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { - return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) + b = b[:4] + return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits) } // bulkHash4 will compute hashes using the same // algorithm as hash4 func bulkHash4(b []byte, dst []uint32) { - if len(b) < minMatchLength { + if len(b) < 4 { return } hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 - dst[0] = (hb * hashmul) >> (32 - hashBits) - end := len(b) - minMatchLength + 1 + dst[0] = hash4u(hb, hashBits) + end := len(b) - 4 + 1 for i := 1; i < end; i++ { hb = (hb << 8) | uint32(b[i+3]) - dst[i] = (hb * hashmul) >> (32 - hashBits) - } -} - -// matchLen returns the number of matching bytes in a and b -// up to length 'max'. Both slices must be at least 'max' -// bytes in size. -func matchLen(a, b []byte, max int) int { - a = a[:max] - b = b[:len(a)] - for i, av := range a { - if b[i] != av { - return i - } + dst[i] = hash4u(hb, hashBits) } - return max } func (d *compressor) initDeflate() { @@ -424,149 +359,6 @@ func (d *compressor) initDeflate() { s.offset = 0 s.hash = 0 s.chainHead = -1 - s.bulkHasher = bulkHash4 - if useSSE42 { - s.bulkHasher = crc32sseAll - } -} - -// Assumes that d.fastSkipHashing != skipNever, -// otherwise use deflateLazy -func (d *compressor) deflate() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = false - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - if s.index < s.maxInsertIndex { - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) - } - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - if d.tokens.n > 0 { - if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) - ch := s.hashHead[s.hash&hashMask] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset) - } - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - if s.length >= minMatchLength { - s.ii = 0 - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - // "s.length-3" should NOT be "s.length-minMatchLength", since the format always assume 3 - d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize)) - d.tokens.n++ - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - if s.length <= d.fastSkipHashing { - var newIndex int - newIndex = s.index + s.length - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - s.hash = newH - } - s.index = newIndex - } else { - // For matches this long, we don't bother inserting each individual - // item into the table. - s.index += s.length - if s.index < s.maxInsertIndex { - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) - } - } - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - } else { - s.ii++ - end := s.index + int(s.ii>>uint(d.fastSkipHashing)) + 1 - if end > d.windowEnd { - end = d.windowEnd - } - for i := s.index; i < end; i++ { - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) - d.tokens.n++ - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { - return - } - d.tokens.n = 0 - } - } - s.index = end - } - } } // deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, @@ -603,15 +395,14 @@ func (d *compressor) deflateLazy() { // Flush current output block if any. if d.byteAvailable { // There is still one pending token that needs to be flushed - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ + d.tokens.AddLiteral(d.window[s.index-1]) d.byteAvailable = false } if d.tokens.n > 0 { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { return } - d.tokens.n = 0 + d.tokens.Reset() } return } @@ -642,8 +433,7 @@ func (d *compressor) deflateLazy() { if prevLength >= minMatchLength && s.length <= prevLength { // There was a match at the previous step, and the current match is // not better. Output the previous match. - d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) - d.tokens.n++ + d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) // Insert in the hash table all strings up to the end of the match. // index and index-1 are already inserted. If there is not enough @@ -684,10 +474,10 @@ func (d *compressor) deflateLazy() { s.length = minMatchLength - 1 if d.tokens.n == maxFlateBlockTokens { // The block includes the current character - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { return } - d.tokens.n = 0 + d.tokens.Reset() } } else { // Reset, if we got a match this run. @@ -697,13 +487,12 @@ func (d *compressor) deflateLazy() { // We have a byte waiting. Emit it. if d.byteAvailable { s.ii++ - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ + d.tokens.AddLiteral(d.window[s.index-1]) if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { return } - d.tokens.n = 0 + d.tokens.Reset() } s.index++ @@ -716,343 +505,24 @@ func (d *compressor) deflateLazy() { break } - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ + d.tokens.AddLiteral(d.window[s.index-1]) if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { return } - d.tokens.n = 0 + d.tokens.Reset() } s.index++ } // Flush last byte - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ + d.tokens.AddLiteral(d.window[s.index-1]) d.byteAvailable = false // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { return } - d.tokens.n = 0 - } - } - } else { - s.index++ - d.byteAvailable = true - } - } - } -} - -// Assumes that d.fastSkipHashing != skipNever, -// otherwise use deflateLazySSE -func (d *compressor) deflateSSE() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = false - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - if s.index < s.maxInsertIndex { - s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask - } - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - if d.tokens.n > 0 { - if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask - ch := s.hashHead[s.hash] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[s.hash] = uint32(s.index + s.hashOffset) - } - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 { - if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - if s.length >= minMatchLength { - s.ii = 0 - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - // "s.length-3" should NOT be "s.length-minMatchLength", since the format always assume 3 - d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize)) - d.tokens.n++ - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - if s.length <= d.fastSkipHashing { - var newIndex int - newIndex = s.index + s.length - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - - crc32sseAll(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - s.hash = newH - } - s.index = newIndex - } else { - // For matches this long, we don't bother inserting each individual - // item into the table. - s.index += s.length - if s.index < s.maxInsertIndex { - s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask - } - } - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - } else { - s.ii++ - end := s.index + int(s.ii>>5) + 1 - if end > d.windowEnd { - end = d.windowEnd - } - for i := s.index; i < end; i++ { - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) - d.tokens.n++ - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { - return - } - d.tokens.n = 0 - } - } - s.index = end - } - } -} - -// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, -// meaning it always has lazy matching on. -func (d *compressor) deflateLazySSE() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = false - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - if s.index < s.maxInsertIndex { - s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask - } - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - // Flush current output block if any. - if d.byteAvailable { - // There is still one pending token that needs to be flushed - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ - d.byteAvailable = false - } - if d.tokens.n > 0 { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask - ch := s.hashHead[s.hash] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[s.hash] = uint32(s.index + s.hashOffset) - } - prevLength := s.length - prevOffset := s.offset - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - if prevLength >= minMatchLength && s.length <= prevLength { - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) - d.tokens.n++ - - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - var newIndex int - newIndex = s.index + prevLength - 1 - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - crc32sseAll(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - s.hash = newH - } - - s.index = newIndex - d.byteAvailable = false - s.length = minMatchLength - 1 - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - } else { - // Reset, if we got a match this run. - if s.length >= minMatchLength { - s.ii = 0 - } - // We have a byte waiting. Emit it. - if d.byteAvailable { - s.ii++ - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - s.index++ - - // If we have a long run of no matches, skip additional bytes - // Resets when s.ii overflows after 64KB. - if s.ii > 31 { - n := int(s.ii >> 6) - for j := 0; j < n; j++ { - if s.index >= d.windowEnd-1 { - break - } - - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 - } - s.index++ - } - // Flush last byte - d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1])) - d.tokens.n++ - d.byteAvailable = false - // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.n = 0 + d.tokens.Reset() } } } else { @@ -1085,17 +555,17 @@ func (d *compressor) storeHuff() { if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { return } - d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) d.err = d.w.err d.windowEnd = 0 } -// storeHuff will compress and store the currently added data, +// storeFast will compress and store the currently added data, // if enough has been accumulated or we at the end of the stream. // Any error that occurred will be in d.err -func (d *compressor) storeSnappy() { +func (d *compressor) storeFast() { // We only compress if we have maxStoreBlockSize. - if d.windowEnd < maxStoreBlockSize { + if d.windowEnd < len(d.window) { if !d.sync { return } @@ -1106,32 +576,30 @@ func (d *compressor) storeSnappy() { } if d.windowEnd <= 32 { d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - d.tokens.n = 0 - d.windowEnd = 0 } else { - d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) d.err = d.w.err } - d.tokens.n = 0 + d.tokens.Reset() d.windowEnd = 0 - d.snap.Reset() + d.fast.Reset() return } } - d.snap.Encode(&d.tokens, d.window[:d.windowEnd]) + d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) // If we made zero matches, store the block as is. - if int(d.tokens.n) == d.windowEnd { + if d.tokens.n == 0 { d.err = d.writeStoredBlock(d.window[:d.windowEnd]) // If we removed less than 1/16th, huffman compress the block. } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { - d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) d.err = d.w.err } else { - d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd]) + d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) d.err = d.w.err } - d.tokens.n = 0 + d.tokens.Reset() d.windowEnd = 0 } @@ -1176,36 +644,26 @@ func (d *compressor) init(w io.Writer, level int) (err error) { d.fill = (*compressor).fillBlock d.step = (*compressor).store case level == ConstantCompression: + d.w.logReusePenalty = uint(4) d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeHuff - case level >= 1 && level <= 4: - d.snap = newFastEnc(level) - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeSnappy case level == DefaultCompression: level = 5 fallthrough - case 5 <= level && level <= 9: + case level >= 1 && level <= 6: + d.w.logReusePenalty = uint(level + 1) + d.fast = newFastEnc(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + case 7 <= level && level <= 9: + d.w.logReusePenalty = uint(level) d.state = &advancedState{} d.compressionLevel = levels[level] d.initDeflate() d.fill = (*compressor).fillDeflate - if d.fastSkipHashing == skipNever { - if useSSE42 { - d.step = (*compressor).deflateLazySSE - } else { - d.step = (*compressor).deflateLazy - } - } else { - if useSSE42 { - d.step = (*compressor).deflateSSE - } else { - d.step = (*compressor).deflate - - } - } + d.step = (*compressor).deflateLazy default: return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) } @@ -1218,10 +676,10 @@ func (d *compressor) reset(w io.Writer) { d.sync = false d.err = nil // We only need to reset a few things for Snappy. - if d.snap != nil { - d.snap.Reset() + if d.fast != nil { + d.fast.Reset() d.windowEnd = 0 - d.tokens.n = 0 + d.tokens.Reset() return } switch d.compressionLevel.chain { @@ -1240,7 +698,7 @@ func (d *compressor) reset(w io.Writer) { s.hashOffset = 1 s.index, d.windowEnd = 0, 0 d.blockStart, d.byteAvailable = 0, false - d.tokens.n = 0 + d.tokens.Reset() s.length = minMatchLength - 1 s.offset = 0 s.hash = 0 diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go new file mode 100644 index 0000000000000..b0a470f92e0eb --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -0,0 +1,257 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "fmt" + "math/bits" +) + +type fastEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newFastEnc(level int) fastEnc { + switch level { + case 1: + return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} + case 2: + return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} + case 3: + return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} + case 4: + return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} + case 5: + return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} + case 6: + return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 16 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset + + bTableBits = 18 // Bits used in the big tables + bTableSize = 1 << bTableBits // Size of the table + allocHistory = maxMatchOffset * 10 // Size to preallocate for history. + bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize // Reset the buffer offset when reaching this. +) + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +func load32(b []byte, i int) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func load3232(b []byte, i int32) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func hash(u uint32) uint32 { + return (u * 0x1e35a7bd) >> tableShift +} + +type tableEntry struct { + val uint32 + offset int32 +} + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastGen struct { + hist []byte + cur int32 +} + +func (e *fastGen) addBlock(src []byte) int32 { + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < maxMatchOffset*2 { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4u(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> ((32 - h) & 31) +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4x64(u uint64, h uint8) uint32 { + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastGen) Reset() { + if cap(e.hist) < int(maxMatchOffset*8) { + l := maxMatchOffset * 8 + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } + // We offset current position so everything will be out of reach + e.cur += maxMatchOffset + int32(len(e.hist)) + e.hist = e.hist[:0] +} + +// matchLen returns the maximum length. +// 'a' must be the shortest of the two. +func matchLen(a, b []byte) int { + b = b[:len(a)] + var checked int + if len(a) > 4 { + // Try 4 bytes first + if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { + return bits.TrailingZeros32(diff) >> 3 + } + // Switch to 8 byte matching. + checked = 4 + a = a[4:] + b = b[4:] + for len(a) >= 8 { + b = b[:len(a)] + if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) + } + checked += 8 + a = a[8:] + b = b[8:] + } + } + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + return int(i) + checked + } + } + return len(a) + checked +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index f46c654189fc6..dd74ffb87232b 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -85,26 +85,48 @@ type huffmanBitWriter struct { // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 - nbits uint - bytes [256]byte - codegenFreq [codegenCodeCount]int32 + nbits uint16 nbytes uint8 - literalFreq []int32 - offsetFreq []int32 - codegen []uint8 literalEncoding *huffmanEncoder offsetEncoding *huffmanEncoder codegenEncoding *huffmanEncoder err error + lastHeader int + // Set between 0 (reused block can be up to 2x the size) + logReusePenalty uint + lastHuffMan bool + bytes [256]byte + literalFreq [lengthCodesStart + 32]uint16 + offsetFreq [32]uint16 + codegenFreq [codegenCodeCount]uint16 + + // codegen must have an extra space for the final symbol. + codegen [literalCount + offsetCodeCount + 1]uint8 } +// Huffman reuse. +// +// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. +// +// This is controlled by several variables: +// +// If lastHeader is non-zero the Huffman table can be reused. +// This also indicates that a Huffman table has been generated that can output all +// possible symbols. +// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated +// an EOB with the previous table must be written. +// +// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. +// +// An incoming block estimates the output size of a new table using a 'fresh' by calculating the +// optimal size and adding a penalty in 'logReusePenalty'. +// A Huffman table is not optimal, which is why we add a penalty, and generating a new table +// is slower both for compression and decompression. + func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { return &huffmanBitWriter{ writer: w, - literalFreq: make([]int32, lengthCodesStart+32), - offsetFreq: make([]int32, 32), - codegen: make([]uint8, maxNumLit+offsetCodeCount+1), - literalEncoding: newHuffmanEncoder(maxNumLit), + literalEncoding: newHuffmanEncoder(literalCount), codegenEncoding: newHuffmanEncoder(codegenCodeCount), offsetEncoding: newHuffmanEncoder(offsetCodeCount), } @@ -113,7 +135,41 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { func (w *huffmanBitWriter) reset(writer io.Writer) { w.writer = writer w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil - w.bytes = [256]byte{} + w.lastHeader = 0 + w.lastHuffMan = false +} + +func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) { + offsets, lits = true, true + a := t.offHist[:offsetCodeCount] + b := w.offsetFreq[:len(a)] + for i := range a { + if b[i] == 0 && a[i] != 0 { + offsets = false + break + } + } + + a = t.extraHist[:literalCount-256] + b = w.literalFreq[256:literalCount] + b = b[:len(a)] + for i := range a { + if b[i] == 0 && a[i] != 0 { + lits = false + break + } + } + if lits { + a = t.litHist[:] + b = w.literalFreq[:len(a)] + for i := range a { + if b[i] == 0 && a[i] != 0 { + lits = false + break + } + } + } + return } func (w *huffmanBitWriter) flush() { @@ -144,30 +200,11 @@ func (w *huffmanBitWriter) write(b []byte) { _, w.err = w.writer.Write(b) } -func (w *huffmanBitWriter) writeBits(b int32, nb uint) { - w.bits |= uint64(b) << w.nbits +func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { + w.bits |= uint64(b) << (w.nbits & 63) w.nbits += nb if w.nbits >= 48 { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - w.bytes[n] = byte(bits) - w.bytes[n+1] = byte(bits >> 8) - w.bytes[n+2] = byte(bits >> 16) - w.bytes[n+3] = byte(bits >> 24) - w.bytes[n+4] = byte(bits >> 32) - w.bytes[n+5] = byte(bits >> 40) - n += 6 - if n >= bufferFlushSize { - if w.err != nil { - n = 0 - return - } - w.write(w.bytes[:n]) - n = 0 - } - w.nbytes = n + w.writeOutBits() } } @@ -213,7 +250,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE // a copy of the frequencies, and as the place where we put the result. // This is fine because the output is always shorter than the input used // so far. - codegen := w.codegen // cache + codegen := w.codegen[:] // cache // Copy the concatenated code sizes to codegen. Put a marker at the end. cgnl := codegen[:numLiterals] for i := range cgnl { @@ -292,30 +329,54 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE codegen[outIndex] = badCode } -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { +func (w *huffmanBitWriter) codegens() int { + numCodegens := len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return numCodegens +} + +func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { numCodegens = len(w.codegenFreq) for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { numCodegens-- } - header := 3 + 5 + 5 + 4 + (3 * numCodegens) + + return 3 + 5 + 5 + 4 + (3 * numCodegens) + w.codegenEncoding.bitLength(w.codegenFreq[:]) + int(w.codegenFreq[16])*2 + int(w.codegenFreq[17])*3 + - int(w.codegenFreq[18])*7 + int(w.codegenFreq[18])*7, numCodegens +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + header, numCodegens := w.headerSize() size = header + - litEnc.bitLength(w.literalFreq) + - offEnc.bitLength(w.offsetFreq) + + litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + extraBits - return size, numCodegens } +// extraBitSize will return the number of bits that will be written +// as "extra" bits on matches. +func (w *huffmanBitWriter) extraBitSize() int { + total := 0 + for i, n := range w.literalFreq[257:literalCount] { + total += int(n) * int(lengthExtraBits[i&31]) + } + for i, n := range w.offsetFreq[:offsetCodeCount] { + total += int(n) * int(offsetExtraBits[i&31]) + } + return total +} + // fixedSize returns the size of dynamically encoded data in bits. func (w *huffmanBitWriter) fixedSize(extraBits int) int { return 3 + - fixedLiteralEncoding.bitLength(w.literalFreq) + - fixedOffsetEncoding.bitLength(w.offsetFreq) + + fixedLiteralEncoding.bitLength(w.literalFreq[:]) + + fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + extraBits } @@ -333,30 +394,36 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { } func (w *huffmanBitWriter) writeCode(c hcode) { + // The function does not get inlined if we "& 63" the shift. w.bits |= uint64(c.code) << w.nbits - w.nbits += uint(c.len) + w.nbits += c.len if w.nbits >= 48 { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - w.bytes[n] = byte(bits) - w.bytes[n+1] = byte(bits >> 8) - w.bytes[n+2] = byte(bits >> 16) - w.bytes[n+3] = byte(bits >> 24) - w.bytes[n+4] = byte(bits >> 32) - w.bytes[n+5] = byte(bits >> 40) - n += 6 - if n >= bufferFlushSize { - if w.err != nil { - n = 0 - return - } - w.write(w.bytes[:n]) + w.writeOutBits() + } +} + +// writeOutBits will write bits to the buffer. +func (w *huffmanBitWriter) writeOutBits() { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + w.bytes[n] = byte(bits) + w.bytes[n+1] = byte(bits >> 8) + w.bytes[n+2] = byte(bits >> 16) + w.bytes[n+3] = byte(bits >> 24) + w.bytes[n+4] = byte(bits >> 32) + w.bytes[n+5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + if w.err != nil { n = 0 + return } - w.nbytes = n + w.write(w.bytes[:n]) + n = 0 } + w.nbytes = n } // Write the header of a dynamic Huffman block to the output stream. @@ -395,15 +462,12 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n case 16: w.writeBits(int32(w.codegen[i]), 2) i++ - break case 17: w.writeBits(int32(w.codegen[i]), 3) i++ - break case 18: w.writeBits(int32(w.codegen[i]), 7) i++ - break } } } @@ -412,6 +476,11 @@ func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { if w.err != nil { return } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } var flag int32 if isEof { flag = 1 @@ -426,6 +495,12 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { if w.err != nil { return } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + // Indicate that we are a fixed Huffman block var value int32 = 2 if isEof { @@ -439,29 +514,23 @@ func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { // is larger than the original bytes, the data will be written as a // stored block. // If the input is nil, the tokens will always be Huffman encoded. -func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { +func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { if w.err != nil { return } - tokens = append(tokens, endBlockMarker) - numLiterals, numOffsets := w.indexTokens(tokens) - + tokens.AddEOB() + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + numLiterals, numOffsets := w.indexTokens(tokens, false) + w.generate(tokens) var extraBits int storedSize, storable := w.storedSize(input) if storable { - // We only bother calculating the costs of the extra bits required by - // the length of offset fields (which will be the same for both fixed - // and dynamic encoding), if we need to compare those two encodings - // against stored encoding. - for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { - // First eight length codes have extra size = 0. - extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) - } - for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { - // First four offset codes have extra size = 0. - extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode&63]) - } + extraBits = w.extraBitSize() } // Figure out smallest code. @@ -500,7 +569,7 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { } // Write the tokens. - w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) + w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) } // writeBlockDynamic encodes a block using a dynamic Huffman table. @@ -508,72 +577,103 @@ func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { // histogram distribution. // If input is supplied and the compression savings are below 1/16th of the // input size the block is stored. -func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) { +func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { if w.err != nil { return } - tokens = append(tokens, endBlockMarker) - numLiterals, numOffsets := w.indexTokens(tokens) + sync = sync || eof + if sync { + tokens.AddEOB() + } - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0) + // We cannot reuse pure huffman table. + if w.lastHuffMan && w.lastHeader > 0 { + // We will not try to reuse. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } + if !sync { + tokens.Fill() + } + numLiterals, numOffsets := w.indexTokens(tokens, !sync) - // Store bytes, if we don't get a reasonable improvement. - if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return + var size int + // Check if we should reuse. + if w.lastHeader > 0 { + // Estimate size for using a new table + newSize := w.lastHeader + tokens.EstimatedBits() + + // The estimated size is calculated as an optimal table. + // We add a penalty to make it more realistic and re-use a bit more. + newSize += newSize >> (w.logReusePenalty & 31) + extra := w.extraBitSize() + reuseSize, _ := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extra) + + // Check if a new table is better. + if newSize < reuseSize { + // Write the EOB we owe. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + size = newSize + w.lastHeader = 0 + } else { + size = reuseSize + } + // Check if we get a reasonable size decrease. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + w.lastHeader = 0 + return + } } - // Write Huffman table. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + // We want a new block/table + if w.lastHeader == 0 { + w.generate(tokens) + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + var numCodegens int + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize()) + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + w.lastHeader = 0 + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + w.lastHeader, _ = w.headerSize() + w.lastHuffMan = false + } + if sync { + w.lastHeader = 0 + } // Write the tokens. - w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes) + w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) } // indexTokens indexes a slice of tokens, and updates // literalFreq and offsetFreq, and generates literalEncoding // and offsetEncoding. // The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { - for i := range w.literalFreq { - w.literalFreq[i] = 0 - } - for i := range w.offsetFreq { - w.offsetFreq[i] = 0 - } +func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { + copy(w.literalFreq[:], t.litHist[:]) + copy(w.literalFreq[256:], t.extraHist[:]) + copy(w.offsetFreq[:], t.offHist[:offsetCodeCount]) - if len(tokens) == 0 { + if t.n == 0 { return } - - // Only last token should be endBlockMarker. - if tokens[len(tokens)-1] == endBlockMarker { - w.literalFreq[endBlockMarker]++ - tokens = tokens[:len(tokens)-1] + if filled { + return maxNumLit, maxNumDist } - - // Create slices up to the next power of two to avoid bounds checks. - lits := w.literalFreq[:256] - offs := w.offsetFreq[:32] - lengths := w.literalFreq[lengthCodesStart:] - lengths = lengths[:32] - for _, t := range tokens { - if t < endBlockMarker { - lits[t.literal()]++ - continue - } - length := t.length() - offset := t.offset() - lengths[lengthCode(length)&31]++ - offs[offsetCode(offset)&31]++ - } - // get the number of literals numLiterals = len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { @@ -590,11 +690,14 @@ func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets w.offsetFreq[0] = 1 numOffsets = 1 } - w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15) - w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) return } +func (w *huffmanBitWriter) generate(t *tokens) { + w.literalEncoding.generate(w.literalFreq[:literalCount], 15) + w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) +} + // writeTokens writes a slice of tokens to the output. // codes for literal and offset encoding must be supplied. func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { @@ -626,8 +729,19 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the length length := t.length() lengthCode := lengthCode(length) - w.writeCode(lengths[lengthCode&31]) - extraLengthBits := uint(lengthExtraBits[lengthCode&31]) + if false { + w.writeCode(lengths[lengthCode&31]) + } else { + // inlined + c := lengths[lengthCode&31] + w.bits |= uint64(c.code) << (w.nbits & 63) + w.nbits += c.len + if w.nbits >= 48 { + w.writeOutBits() + } + } + + extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) if extraLengthBits > 0 { extraLength := int32(length - lengthBase[lengthCode&31]) w.writeBits(extraLength, extraLengthBits) @@ -635,8 +749,18 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the offset offset := t.offset() offsetCode := offsetCode(offset) - w.writeCode(offs[offsetCode&31]) - extraOffsetBits := uint(offsetExtraBits[offsetCode&63]) + if false { + w.writeCode(offs[offsetCode&31]) + } else { + // inlined + c := offs[offsetCode&31] + w.bits |= uint64(c.code) << (w.nbits & 63) + w.nbits += c.len + if w.nbits >= 48 { + w.writeOutBits() + } + } + extraOffsetBits := uint16(offsetExtraBits[offsetCode&63]) if extraOffsetBits > 0 { extraOffset := int32(offset - offsetBase[offsetCode&63]) w.writeBits(extraOffset, extraOffsetBits) @@ -661,75 +785,93 @@ func init() { // writeBlockHuff encodes a block of bytes as either // Huffman encoded literals or uncompressed bytes if the // results only gains very little from compression. -func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { if w.err != nil { return } // Clear histogram - for i := range w.literalFreq { + for i := range w.literalFreq[:] { w.literalFreq[i] = 0 } + if !w.lastHuffMan { + for i := range w.offsetFreq[:] { + w.offsetFreq[i] = 0 + } + } // Add everything as literals - histogram(input, w.literalFreq) + estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) + 15 - w.literalFreq[endBlockMarker] = 1 + // Store bytes, if we don't get a reasonable improvement. + ssize, storable := w.storedSize(input) + if storable && ssize < (estBits+estBits>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } - const numLiterals = endBlockMarker + 1 - const numOffsets = 1 + if w.lastHeader > 0 { + size, _ := w.dynamicSize(w.literalEncoding, huffOffset, w.lastHeader) + estBits += estBits >> (w.logReusePenalty) - w.literalEncoding.generate(w.literalFreq[:maxNumLit], 15) + if estBits < size { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + } - // Figure out smallest code. - // Always use dynamic Huffman or Store - var numCodegens int + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + if w.lastHeader == 0 { + w.literalFreq[endBlockMarker] = 1 + w.literalEncoding.generate(w.literalFreq[:numLiterals], 15) - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + numCodegens := w.codegens() - // Store bytes, if we don't get a reasonable improvement. - if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + w.lastHuffMan = true + w.lastHeader, _ = w.headerSize() } - // Huffman. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) encoding := w.literalEncoding.codes[:257] - n := w.nbytes for _, t := range input { // Bitwriting inlined, ~30% speedup c := encoding[t] - w.bits |= uint64(c.code) << w.nbits - w.nbits += uint(c.len) - if w.nbits < 48 { - continue - } - // Store 6 bytes - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - w.bytes[n] = byte(bits) - w.bytes[n+1] = byte(bits >> 8) - w.bytes[n+2] = byte(bits >> 16) - w.bytes[n+3] = byte(bits >> 24) - w.bytes[n+4] = byte(bits >> 32) - w.bytes[n+5] = byte(bits >> 40) - n += 6 - if n < bufferFlushSize { - continue - } - w.write(w.bytes[:n]) - if w.err != nil { - return // Return early in the event of write failures + w.bits |= uint64(c.code) << ((w.nbits) & 63) + w.nbits += c.len + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + w.bytes[n] = byte(bits) + w.bytes[n+1] = byte(bits >> 8) + w.bytes[n+2] = byte(bits >> 16) + w.bytes[n+3] = byte(bits >> 24) + w.bytes[n+4] = byte(bits >> 32) + w.bytes[n+5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + if w.err != nil { + n = 0 + return + } + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n } - n = 0 } - w.nbytes = n - w.writeCode(encoding[endBlockMarker]) + if eof || sync { + w.writeCode(encoding[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index f65f793361480..1810c6898d0b6 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -10,6 +10,12 @@ import ( "sort" ) +const ( + maxBitsLimit = 16 + // number of valid literals + literalCount = 286 +) + // hcode is a huffman code with a bit code and bit length. type hcode struct { code, len uint16 @@ -25,7 +31,7 @@ type huffmanEncoder struct { type literalNode struct { literal uint16 - freq int32 + freq uint16 } // A levelInfo describes the state of the constructed tree for a given depth. @@ -54,7 +60,11 @@ func (h *hcode) set(code uint16, length uint16) { h.code = code } -func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } +func reverseBits(number uint16, bitLength byte) uint16 { + return bits.Reverse16(number << ((16 - bitLength) & 15)) +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } func newHuffmanEncoder(size int) *huffmanEncoder { // Make capacity to next power of two. @@ -64,10 +74,10 @@ func newHuffmanEncoder(size int) *huffmanEncoder { // Generates a HuffmanCode corresponding to the fixed literal table func generateFixedLiteralEncoding() *huffmanEncoder { - h := newHuffmanEncoder(maxNumLit) + h := newHuffmanEncoder(literalCount) codes := h.codes var ch uint16 - for ch = 0; ch < maxNumLit; ch++ { + for ch = 0; ch < literalCount; ch++ { var bits uint16 var size uint16 switch { @@ -75,17 +85,14 @@ func generateFixedLiteralEncoding() *huffmanEncoder { // size 8, 000110000 .. 10111111 bits = ch + 48 size = 8 - break case ch < 256: // size 9, 110010000 .. 111111111 bits = ch + 400 - 144 size = 9 - break case ch < 280: // size 7, 0000000 .. 0010111 bits = ch - 256 size = 7 - break default: // size 8, 11000000 .. 11000111 bits = ch + 192 - 280 @@ -108,7 +115,7 @@ func generateFixedOffsetEncoding() *huffmanEncoder { var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() -func (h *huffmanEncoder) bitLength(freq []int32) int { +func (h *huffmanEncoder) bitLength(freq []uint16) int { var total int for i, f := range freq { if f != 0 { @@ -118,8 +125,6 @@ func (h *huffmanEncoder) bitLength(freq []int32) int { return total } -const maxBitsLimit = 16 - // Return the number of literals assigned to each bit size in the Huffman encoding // // This method is only called when list.length >= 3 @@ -163,9 +168,9 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // We initialize the levels as if we had already figured this out. levels[level] = levelInfo{ level: level, - lastFreq: list[1].freq, - nextCharFreq: list[2].freq, - nextPairFreq: list[0].freq + list[1].freq, + lastFreq: int32(list[1].freq), + nextCharFreq: int32(list[2].freq), + nextPairFreq: int32(list[0].freq) + int32(list[1].freq), } leafCounts[level][level] = 2 if level == 1 { @@ -197,7 +202,12 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { l.lastFreq = l.nextCharFreq // Lower leafCounts are the same of the previous node. leafCounts[level][level] = n - l.nextCharFreq = list[n].freq + e := list[n] + if e.literal < math.MaxUint16 { + l.nextCharFreq = int32(e.freq) + } else { + l.nextCharFreq = math.MaxInt32 + } } else { // The next item on this row is a pair from the previous row. // nextPairFreq isn't valid until we generate two @@ -273,12 +283,12 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // // freq An array of frequencies, in which frequency[i] gives the frequency of literal i. // maxBits The maximum number of bits to use for any literal. -func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { +func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { if h.freqcache == nil { // Allocate a reusable buffer with the longest possible frequency table. - // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit. - // The largest of these is maxNumLit, so we allocate for that case. - h.freqcache = make([]literalNode, maxNumLit+1) + // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. + // The largest of these is literalCount, so we allocate for that case. + h.freqcache = make([]literalNode, literalCount+1) } list := h.freqcache[:len(freq)+1] // Number of non-zero literals @@ -345,3 +355,27 @@ func (s byFreq) Less(i, j int) bool { } func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// histogramSize accumulates a histogram of b in h. +// An estimated size in bits is returned. +// Unassigned values are assigned '1' in the histogram. +// len(h) must be >= 256, and h's elements must be all zeroes. +func histogramSize(b []byte, h []uint16, fill bool) int { + h = h[:256] + for _, t := range b { + h[t]++ + } + invTotal := 1.0 / float64(len(b)) + shannon := 0.0 + single := math.Ceil(-math.Log2(invTotal)) + for i, v := range h[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } else if fill { + shannon += single + h[i] = 1 + } + } + return int(shannon + 0.99) +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 800d0ce9e5452..6dc5b5d06e303 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -9,6 +9,7 @@ package flate import ( "bufio" + "fmt" "io" "math/bits" "strconv" @@ -24,6 +25,8 @@ const ( maxNumLit = 286 maxNumDist = 30 numCodes = 19 // number of codes in Huffman meta-code + + debugDecode = false ) // Initialize the fixedHuffmanDecoder only once upon first use. @@ -104,8 +107,8 @@ const ( type huffmanDecoder struct { min int // the minimum code length - chunks *[huffmanNumChunks]uint32 // chunks as described above - links [][]uint32 // overflow links + chunks *[huffmanNumChunks]uint16 // chunks as described above + links [][]uint16 // overflow links linkMask uint32 // mask the width of the link table } @@ -121,7 +124,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { const sanity = false if h.chunks == nil { - h.chunks = &[huffmanNumChunks]uint32{} + h.chunks = &[huffmanNumChunks]uint16{} } if h.min != 0 { *h = huffmanDecoder{chunks: h.chunks, links: h.links} @@ -169,6 +172,9 @@ func (h *huffmanDecoder) init(lengths []int) bool { // accept degenerate single-code codings. See also // TestDegenerateHuffmanCoding. if code != 1<> 1 if cap(h.links) < huffmanNumChunks-link { - h.links = make([][]uint32, huffmanNumChunks-link) + h.links = make([][]uint16, huffmanNumChunks-link) } else { h.links = h.links[:huffmanNumChunks-link] } @@ -196,9 +202,9 @@ func (h *huffmanDecoder) init(lengths []int) bool { if sanity && h.chunks[reverse] != 0 { panic("impossible: overwriting existing chunk") } - h.chunks[reverse] = uint32(off<>= uint(16 - n) if n <= huffmanChunkBits { @@ -347,6 +353,9 @@ func (f *decompressor) nextBlock() { f.huffmanBlock() default: // 3 is reserved. + if debugDecode { + fmt.Println("reserved data block encountered") + } f.err = CorruptInputError(f.roffset) } } @@ -425,11 +434,17 @@ func (f *decompressor) readHuffman() error { } nlit := int(f.b&0x1F) + 257 if nlit > maxNumLit { + if debugDecode { + fmt.Println("nlit > maxNumLit", nlit) + } return CorruptInputError(f.roffset) } f.b >>= 5 ndist := int(f.b&0x1F) + 1 if ndist > maxNumDist { + if debugDecode { + fmt.Println("ndist > maxNumDist", ndist) + } return CorruptInputError(f.roffset) } f.b >>= 5 @@ -453,6 +468,9 @@ func (f *decompressor) readHuffman() error { f.codebits[codeOrder[i]] = 0 } if !f.h1.init(f.codebits[0:]) { + if debugDecode { + fmt.Println("init codebits failed") + } return CorruptInputError(f.roffset) } @@ -480,6 +498,9 @@ func (f *decompressor) readHuffman() error { rep = 3 nb = 2 if i == 0 { + if debugDecode { + fmt.Println("i==0") + } return CorruptInputError(f.roffset) } b = f.bits[i-1] @@ -494,6 +515,9 @@ func (f *decompressor) readHuffman() error { } for f.nb < nb { if err := f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits:", err) + } return err } } @@ -501,6 +525,9 @@ func (f *decompressor) readHuffman() error { f.b >>= nb f.nb -= nb if i+rep > n { + if debugDecode { + fmt.Println("i+rep > n", i, rep, n) + } return CorruptInputError(f.roffset) } for j := 0; j < rep; j++ { @@ -510,6 +537,9 @@ func (f *decompressor) readHuffman() error { } if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + if debugDecode { + fmt.Println("init2 failed") + } return CorruptInputError(f.roffset) } @@ -587,12 +617,18 @@ readLiteral: length = 258 n = 0 default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } f.err = CorruptInputError(f.roffset) return } if n > 0 { for f.nb < n { if err = f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits n>0:", err) + } f.err = err return } @@ -606,6 +642,9 @@ readLiteral: if f.hd == nil { for f.nb < 5 { if err = f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } f.err = err return } @@ -615,6 +654,9 @@ readLiteral: f.nb -= 5 } else { if dist, err = f.huffSym(f.hd); err != nil { + if debugDecode { + fmt.Println("huffsym:", err) + } f.err = err return } @@ -629,6 +671,9 @@ readLiteral: extra := (dist & 1) << nb for f.nb < nb { if err = f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits f.nb f.dict.histSize() { + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } f.err = CorruptInputError(f.roffset) return } @@ -688,6 +739,9 @@ func (f *decompressor) dataBlock() { n := int(f.buf[0]) | int(f.buf[1])<<8 nn := int(f.buf[2]) | int(f.buf[3])<<8 if uint16(nn) != uint16(^n) { + if debugDecode { + fmt.Println("uint16(nn) != uint16(^n)", nn, ^n) + } f.err = CorruptInputError(f.roffset) return } @@ -789,6 +843,9 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { if n == 0 { f.b = b f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } f.err = CorruptInputError(f.roffset) return 0, f.err } diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go new file mode 100644 index 0000000000000..20de8f11f4f37 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -0,0 +1,174 @@ +package flate + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL1 struct { + fastGen + table [tableSize]tableEntry +} + +// EncodeL1 uses a similar algorithm to level 1 +func (e *fastEncL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3232(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hash(cv) + candidate = e.table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + nextHash = hash(uint32(now)) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && cv == candidate.val { + e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && cv == candidate.val { + e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + break + } + cv = uint32(now) + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + + // Save the match found + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + if s >= sLimit { + // Index first pair after match end. + if int(s+l+4) < len(src) { + cv := load3232(src, s) + e.table[hash(cv)] = tableEntry{offset: s + e.cur, val: cv} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hash(uint32(x)) + e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} + x >>= 16 + currHash := hash(uint32(x)) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x)} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != candidate.val { + cv = uint32(x >> 8) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go new file mode 100644 index 0000000000000..7c824431e6477 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -0,0 +1,199 @@ +package flate + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL2 struct { + fastGen + table [bTableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *fastEncL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3232(src, s) + for { + // When should we start skipping if we haven't found matches in a long while. + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hash4u(cv, bTableBits) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash] + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + nextHash = hash4u(uint32(now), bTableBits) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && cv == candidate.val { + e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && cv == candidate.val { + break + } + cv = uint32(now) + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+l+4) < len(src) { + cv := load3232(src, s) + e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur, val: cv} + } + goto emitRemainder + } + + // Store every second hash in-between, but offset by 1. + for i := s - l + 2; i < s-5; i += 7 { + x := load6432(src, int32(i)) + nextHash := hash4u(uint32(x), bTableBits) + e.table[nextHash] = tableEntry{offset: e.cur + i, val: uint32(x)} + // Skip one + x >>= 16 + nextHash = hash4u(uint32(x), bTableBits) + e.table[nextHash] = tableEntry{offset: e.cur + i + 2, val: uint32(x)} + // Skip one + x >>= 16 + nextHash = hash4u(uint32(x), bTableBits) + e.table[nextHash] = tableEntry{offset: e.cur + i + 4, val: uint32(x)} + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hash4u(uint32(x), bTableBits) + prevHash2 := hash4u(uint32(x>>8), bTableBits) + e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} + e.table[prevHash2] = tableEntry{offset: o + 1, val: uint32(x >> 8)} + currHash := hash4u(uint32(x>>16), bTableBits) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x >> 16)} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x>>16) != candidate.val { + cv = uint32(x >> 24) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go new file mode 100644 index 0000000000000..4153d24c95fa4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -0,0 +1,225 @@ +package flate + +// fastEncL3 +type fastEncL3 struct { + fastGen + table [tableSize]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *fastEncL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + } + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + e.table[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // Skip if too small. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3232(src, s) + for { + const skipLog = 6 + nextS := s + var candidate tableEntry + for { + nextHash := hash(cv) + s = nextS + nextS = s + 1 + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash] + now := load3232(src, nextS) + e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + + // Check both candidates + candidate = candidates.Cur + offset := s - (candidate.offset - e.cur) + if cv == candidate.val { + if offset > maxMatchOffset { + cv = now + // Previous will also be invalid, we have nothing. + continue + } + o2 := s - (candidates.Prev.offset - e.cur) + if cv != candidates.Prev.val || o2 > maxMatchOffset { + break + } + // Both match and are valid, pick longest. + l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) + if l2 > l1 { + candidate = candidates.Prev + } + break + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } + } + cv = now + } + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash := hash(cv) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash] + cv = uint32(x) + e.table[currHash] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go new file mode 100644 index 0000000000000..c689ac771b823 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -0,0 +1,210 @@ +package flate + +import "fmt" + +type fastEncL4 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntry +} + +func (e *fastEncL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.bTable[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var t int32 + for { + nextHashS := hash4x64(cv, tableBits) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHashS] = entry + e.bTable[nextHashL] = entry + + t = lCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == lCandidate.val { + // We got a long match. Use that. + break + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + // Found a 4 match... + lCandidate = e.bTable[hash7(next, tableBits)] + + // If the next long is a candidate, check if we should use that instead... + lOff := nextS - (lCandidate.offset - e.cur) + if lOff < maxMatchOffset && lCandidate.val == uint32(next) { + l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) + if l2 > l1 { + s = nextS + t = lCandidate.offset - e.cur + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + if false { + if t >= s { + panic("s-t") + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+8) < len(src) { + cv := load6432(src, s) + e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} + } + goto emitRemainder + } + + // Store every 3rd hash in-between + if true { + i := nextS + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur, val: uint32(cv)} + t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hash4u(t2.val, tableBits)] = t2 + + i += 3 + for ; i < s-1; i += 3 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur, val: uint32(cv)} + t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hash4u(t2.val, tableBits)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hash4x64(x, tableBits) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} + e.bTable[prevHashL] = tableEntry{offset: o, val: uint32(x)} + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go new file mode 100644 index 0000000000000..14a2356126aad --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -0,0 +1,276 @@ +package flate + +import "fmt" + +type fastEncL5 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hash4x64(cv, tableBits) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hash4x64(next, tableBits) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == lCandidate.Cur.val { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if lCandidate.Cur.val == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + if l == 0 { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + if false { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur, val: uint32(cv)} + e.table[hash4x64(cv, tableBits)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1, val: uint32(cv)} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1, val: uint32(cv)} + e.table[hash4x64(cv, tableBits)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur, val: uint32(cv)} + t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hash4u(t2.val, tableBits)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hash4x64(x, tableBits) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o, val: uint32(x)}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go new file mode 100644 index 0000000000000..cad0c7df7fc3c --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -0,0 +1,279 @@ +package flate + +import "fmt" + +type fastEncL6 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL6) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + // Repeat MUST be > 1 and within range + repeat := int32(1) + for { + const skipLog = 7 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hash4x64(cv, tableBits) + nextHashL := hash7(cv, tableBits) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + // Calculate hashes of 'next' + nextHashS = hash4x64(next, tableBits) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == lCandidate.Cur.val { + // Long candidate matches at least 4 bytes. + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + + // Check the previous long candidate as well. + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + // Current value did not match, but check if previous long value does. + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + + // Look up next long candidate (at nextS) + lCandidate = e.bTable[nextHashL] + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur + + // Check repeat at s + repOff + const repOff = 1 + t2 := s - repeat + repOff + if load3232(src, t2) == uint32(cv>>(8*repOff)) { + ml := e.matchlen(s+4+repOff, t2+4, src) + 4 + if ml > l { + t = t2 + l = ml + s += repOff + // Not worth checking more. + break + } + } + + // If the next long is a candidate, use that... + t2 = lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if lCandidate.Cur.val == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + // This is ok, but check previous as well. + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + if l == 0 { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + if false { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + repeat = s - t + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index after match end. + for i := nextS + 1; i < int32(len(src))-8; i += 2 { + cv := load6432(src, i) + e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur, val: uint32(cv)} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur, val: uint32(cv)}, eLong.Cur + } + goto emitRemainder + } + + // Store every long hash in-between and every second short. + if true { + for i := nextS + 1; i < s-1; i += 2 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur, val: uint32(cv)} + t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong2 := &e.bTable[hash7(cv>>8, tableBits)] + e.table[hash4x64(cv, tableBits)] = t + eLong.Cur, eLong.Prev = t, eLong.Cur + eLong2.Cur, eLong2.Prev = t2, eLong2.Cur + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + cv = load6432(src, s) + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/reverse_bits.go b/vendor/github.com/klauspost/compress/flate/reverse_bits.go deleted file mode 100644 index c1a02720d1a9b..0000000000000 --- a/vendor/github.com/klauspost/compress/flate/reverse_bits.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -var reverseByte = [256]byte{ - 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, - 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, - 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, - 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, - 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, - 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, - 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, - 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, - 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, - 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, - 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, - 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, - 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, - 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, - 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, - 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, - 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, - 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, - 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, - 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, - 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, - 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, - 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, - 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, - 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, - 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, - 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, - 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, - 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, - 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, - 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, - 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, -} - -func reverseUint16(v uint16) uint16 { - return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8 -} - -func reverseBits(number uint16, bitLength byte) uint16 { - return reverseUint16(number << uint8(16-bitLength)) -} diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go deleted file mode 100644 index aebebd5248f91..0000000000000 --- a/vendor/github.com/klauspost/compress/flate/snappy.go +++ /dev/null @@ -1,900 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Modified for deflate by Klaus Post (c) 2015. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst *tokens, lit []byte) { - ol := int(dst.n) - for i, v := range lit { - dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) - } - dst.n += uint16(len(lit)) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -func emitCopy(dst *tokens, offset, length int) { - dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize)) - dst.n++ -} - -type fastEnc interface { - Encode(dst *tokens, src []byte) - Reset() -} - -func newFastEnc(level int) fastEnc { - switch level { - case 1: - return &snappyL1{} - case 2: - return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} - case 3: - return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} - case 4: - return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}} - default: - panic("invalid level specified") - } -} - -const ( - tableBits = 14 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. - baseMatchOffset = 1 // The smallest match offset - baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 - maxMatchOffset = 1 << 15 // The largest match offset -) - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func hash(u uint32) uint32 { - return (u * 0x1e35a7bd) >> tableShift -} - -// snappyL1 encapsulates level 1 compression -type snappyL1 struct{} - -func (e *snappyL1) Reset() {} - -func (e *snappyL1) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 16 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Initialize the hash table. - // - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535. - var table [tableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s)) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS)) - if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - emitLiteral(dst, src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of Snappy's: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - s1 := base + maxMatchLength - if s1 > len(src) { - s1 = len(src) - } - a := src[s:s1] - b := src[candidate+4:] - b = b[:len(a)] - l := len(a) - for i := range a { - if a[i] != b[i] { - l = i - break - } - } - s += l - - // matchToken is flate's equivalent of Snappy's emitCopy. - dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)) - dst.n++ - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x >> 0)) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x >> 8)) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x >> 16)) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - emitLiteral(dst, src[nextEmit:]) - } -} - -type tableEntry struct { - val uint32 - offset int32 -} - -func load3232(b []byte, i int32) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load6432(b []byte, i int32) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// snappyGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type snappyGen struct { - prev []byte - cur int32 -} - -// snappyGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type snappyL2 struct { - snappyGen - table [tableSize]tableEntry -} - -// EncodeL2 uses a similar algorithm to level 1, but is capable -// of matching across blocks giving better compression at a small slowdown. -func (e *snappyL2) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 8 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - if e.cur > 1<<30 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxStoreBlockSize - } - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - e.cur += maxStoreBlockSize - e.prev = e.prev[:0] - return - } - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := int32(0) - s := int32(0) - cv := load3232(src, s) - nextHash := hash(cv) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := int32(32) - - nextS := s - var candidate tableEntry - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = e.table[nextHash&tableMask] - now := load3232(src, nextS) - e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv} - nextHash = hash(now) - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || cv != candidate.val { - // Out of range or not matched. - cv = now - continue - } - break - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - emitLiteral(dst, src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - // - s += 4 - t := candidate.offset - e.cur + 4 - l := e.matchlen(s, t, src) - - // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) - dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) - dst.n++ - s += l - nextEmit = s - if s >= sLimit { - t += l - // Index first pair after match end. - if int(t+4) < len(src) && t > 0 { - cv := load3232(src, t) - e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv} - } - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-1) - prevHash := hash(uint32(x)) - e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)} - x >>= 8 - currHash := hash(uint32(x)) - candidate = e.table[currHash&tableMask] - e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != candidate.val { - cv = uint32(x >> 8) - nextHash = hash(cv) - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - emitLiteral(dst, src[nextEmit:]) - } - e.cur += int32(len(src)) - e.prev = e.prev[:len(src)] - copy(e.prev, src) -} - -type tableEntryPrev struct { - Cur tableEntry - Prev tableEntry -} - -// snappyL3 -type snappyL3 struct { - snappyGen - table [tableSize]tableEntryPrev -} - -// Encode uses a similar algorithm to level 2, will check up to two candidates. -func (e *snappyL3) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 8 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - if e.cur > 1<<30 { - for i := range e.table[:] { - e.table[i] = tableEntryPrev{} - } - e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} - } - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - e.cur += maxStoreBlockSize - e.prev = e.prev[:0] - return - } - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := int32(0) - s := int32(0) - cv := load3232(src, s) - nextHash := hash(cv) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := int32(32) - - nextS := s - var candidate tableEntry - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidates := e.table[nextHash&tableMask] - now := load3232(src, nextS) - e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} - nextHash = hash(now) - - // Check both candidates - candidate = candidates.Cur - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - break - } - } else { - // We only check if value mismatches. - // Offset will always be invalid in other cases. - candidate = candidates.Prev - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - break - } - } - } - cv = now - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - emitLiteral(dst, src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - // - s += 4 - t := candidate.offset - e.cur + 4 - l := e.matchlen(s, t, src) - - // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) - dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) - dst.n++ - s += l - nextEmit = s - if s >= sLimit { - t += l - // Index first pair after match end. - if int(t+4) < len(src) && t > 0 { - cv := load3232(src, t) - nextHash = hash(cv) - e.table[nextHash&tableMask] = tableEntryPrev{ - Prev: e.table[nextHash&tableMask].Cur, - Cur: tableEntry{offset: e.cur + t, val: cv}, - } - } - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-3 to s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-3) - prevHash := hash(uint32(x)) - e.table[prevHash&tableMask] = tableEntryPrev{ - Prev: e.table[prevHash&tableMask].Cur, - Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, - } - x >>= 8 - prevHash = hash(uint32(x)) - - e.table[prevHash&tableMask] = tableEntryPrev{ - Prev: e.table[prevHash&tableMask].Cur, - Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, - } - x >>= 8 - prevHash = hash(uint32(x)) - - e.table[prevHash&tableMask] = tableEntryPrev{ - Prev: e.table[prevHash&tableMask].Cur, - Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, - } - x >>= 8 - currHash := hash(uint32(x)) - candidates := e.table[currHash&tableMask] - cv = uint32(x) - e.table[currHash&tableMask] = tableEntryPrev{ - Prev: candidates.Cur, - Cur: tableEntry{offset: s + e.cur, val: cv}, - } - - // Check both candidates - candidate = candidates.Cur - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } - } else { - // We only check if value mismatches. - // Offset will always be invalid in other cases. - candidate = candidates.Prev - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } - } - } - cv = uint32(x >> 8) - nextHash = hash(cv) - s++ - break - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - emitLiteral(dst, src[nextEmit:]) - } - e.cur += int32(len(src)) - e.prev = e.prev[:len(src)] - copy(e.prev, src) -} - -// snappyL4 -type snappyL4 struct { - snappyL3 -} - -// Encode uses a similar algorithm to level 3, -// but will check up to two candidates if first isn't long enough. -func (e *snappyL4) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 8 - 3 - minNonLiteralBlockSize = 1 + 1 + inputMargin - matchLenGood = 12 - ) - - // Protect against e.cur wraparound. - if e.cur > 1<<30 { - for i := range e.table[:] { - e.table[i] = tableEntryPrev{} - } - e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} - } - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - e.cur += maxStoreBlockSize - e.prev = e.prev[:0] - return - } - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := int32(0) - s := int32(0) - cv := load3232(src, s) - nextHash := hash(cv) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := int32(32) - - nextS := s - var candidate tableEntry - var candidateAlt tableEntry - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidates := e.table[nextHash&tableMask] - now := load3232(src, nextS) - e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} - nextHash = hash(now) - - // Check both candidates - candidate = candidates.Cur - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset { - offset = s - (candidates.Prev.offset - e.cur) - if cv == candidates.Prev.val && offset < maxMatchOffset { - candidateAlt = candidates.Prev - } - break - } - } else { - // We only check if value mismatches. - // Offset will always be invalid in other cases. - candidate = candidates.Prev - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset { - break - } - } - } - cv = now - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - emitLiteral(dst, src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - // - s += 4 - t := candidate.offset - e.cur + 4 - l := e.matchlen(s, t, src) - // Try alternative candidate if match length < matchLenGood. - if l < matchLenGood-4 && candidateAlt.offset != 0 { - t2 := candidateAlt.offset - e.cur + 4 - l2 := e.matchlen(s, t2, src) - if l2 > l { - l = l2 - t = t2 - } - } - // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) - dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) - dst.n++ - s += l - nextEmit = s - if s >= sLimit { - t += l - // Index first pair after match end. - if int(t+4) < len(src) && t > 0 { - cv := load3232(src, t) - nextHash = hash(cv) - e.table[nextHash&tableMask] = tableEntryPrev{ - Prev: e.table[nextHash&tableMask].Cur, - Cur: tableEntry{offset: e.cur + t, val: cv}, - } - } - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-3 to s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-3) - prevHash := hash(uint32(x)) - e.table[prevHash&tableMask] = tableEntryPrev{ - Prev: e.table[prevHash&tableMask].Cur, - Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, - } - x >>= 8 - prevHash = hash(uint32(x)) - - e.table[prevHash&tableMask] = tableEntryPrev{ - Prev: e.table[prevHash&tableMask].Cur, - Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, - } - x >>= 8 - prevHash = hash(uint32(x)) - - e.table[prevHash&tableMask] = tableEntryPrev{ - Prev: e.table[prevHash&tableMask].Cur, - Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, - } - x >>= 8 - currHash := hash(uint32(x)) - candidates := e.table[currHash&tableMask] - cv = uint32(x) - e.table[currHash&tableMask] = tableEntryPrev{ - Prev: candidates.Cur, - Cur: tableEntry{offset: s + e.cur, val: cv}, - } - - // Check both candidates - candidate = candidates.Cur - candidateAlt = tableEntry{} - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - offset = s - (candidates.Prev.offset - e.cur) - if cv == candidates.Prev.val && offset <= maxMatchOffset { - candidateAlt = candidates.Prev - } - continue - } - } else { - // We only check if value mismatches. - // Offset will always be invalid in other cases. - candidate = candidates.Prev - if cv == candidate.val { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } - } - } - cv = uint32(x >> 8) - nextHash = hash(cv) - s++ - break - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - emitLiteral(dst, src[nextEmit:]) - } - e.cur += int32(len(src)) - e.prev = e.prev[:len(src)] - copy(e.prev, src) -} - -func (e *snappyGen) matchlen(s, t int32, src []byte) int32 { - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } - - // If we are inside the current block - if t >= 0 { - b := src[t:] - a := src[s:s1] - b = b[:len(a)] - // Extend the match to be as long as possible. - for i := range a { - if a[i] != b[i] { - return int32(i) - } - } - return int32(len(a)) - } - - // We found a match in the previous block. - tp := int32(len(e.prev)) + t - if tp < 0 { - return 0 - } - - // Extend the match to be as long as possible. - a := src[s:s1] - b := e.prev[tp:] - if len(b) > len(a) { - b = b[:len(a)] - } - a = a[:len(b)] - for i := range b { - if a[i] != b[i] { - return int32(i) - } - } - - // If we reached our limit, we matched everything we are - // allowed to in the previous block and we return. - n := int32(len(b)) - if int(s+n) == s1 { - return n - } - - // Continue looking for more matches in the current block. - a = src[s+n : s1] - b = src[:len(a)] - for i := range a { - if a[i] != b[i] { - return int32(i) + n - } - } - return int32(len(a)) + n -} - -// Reset the encoding table. -func (e *snappyGen) Reset() { - e.prev = e.prev[:0] - e.cur += maxMatchOffset -} diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go new file mode 100644 index 0000000000000..a4705119757d7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -0,0 +1,266 @@ +package flate + +import ( + "io" + "math" + "sync" +) + +const ( + maxStatelessBlock = math.MaxInt16 + + slTableBits = 13 + slTableSize = 1 << slTableBits + slTableShift = 32 - slTableBits +) + +type statelessWriter struct { + dst io.Writer + closed bool +} + +func (s *statelessWriter) Close() error { + if s.closed { + return nil + } + s.closed = true + // Emit EOF block + return StatelessDeflate(s.dst, nil, true) +} + +func (s *statelessWriter) Write(p []byte) (n int, err error) { + err = StatelessDeflate(s.dst, p, false) + if err != nil { + return 0, err + } + return len(p), nil +} + +func (s *statelessWriter) Reset(w io.Writer) { + s.dst = w + s.closed = false +} + +// NewStatelessWriter will do compression but without maintaining any state +// between Write calls. +// There will be no memory kept between Write calls, +// but compression and speed will be suboptimal. +// Because of this, the size of actual Write calls will affect output size. +func NewStatelessWriter(dst io.Writer) io.WriteCloser { + return &statelessWriter{dst: dst} +} + +// bitWriterPool contains bit writers that can be reused. +var bitWriterPool = sync.Pool{ + New: func() interface{} { + return newHuffmanBitWriter(nil) + }, +} + +// StatelessDeflate allows to compress directly to a Writer without retaining state. +// When returning everything will be flushed. +func StatelessDeflate(out io.Writer, in []byte, eof bool) error { + var dst tokens + bw := bitWriterPool.Get().(*huffmanBitWriter) + bw.reset(out) + defer func() { + // don't keep a reference to our output + bw.reset(nil) + bitWriterPool.Put(bw) + }() + if eof && len(in) == 0 { + // Just write an EOF block. + // Could be faster... + bw.writeStoredHeader(0, true) + bw.flush() + return bw.err + } + + for len(in) > 0 { + todo := in + if len(todo) > maxStatelessBlock { + todo = todo[:maxStatelessBlock] + } + in = in[len(todo):] + // Compress + statelessEnc(&dst, todo) + isEof := eof && len(in) == 0 + + if dst.n == 0 { + bw.writeStoredHeader(len(todo), isEof) + if bw.err != nil { + return bw.err + } + bw.writeBytes(todo) + } else if int(dst.n) > len(todo)-len(todo)>>4 { + // If we removed less than 1/16th, huffman compress the block. + bw.writeBlockHuff(isEof, todo, false) + } else { + bw.writeBlockDynamic(&dst, isEof, todo, false) + } + if bw.err != nil { + return bw.err + } + dst.Reset() + } + if !eof { + // Align. + bw.writeStoredHeader(0, false) + } + bw.flush() + return bw.err +} + +func hashSL(u uint32) uint32 { + return (u * 0x1e35a7bd) >> slTableShift +} + +func load3216(b []byte, i int16) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6416(b []byte, i int16) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func statelessEnc(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + type tableEntry struct { + offset int16 + } + + var table [slTableSize]tableEntry + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + s := int16(1) + nextEmit := int16(0) + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int16(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3216(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashSL(cv) + candidate = table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit || nextS <= 0 { + goto emitRemainder + } + + now := load6416(src, nextS) + table[nextHash] = tableEntry{offset: s} + nextHash = hashSL(uint32(now)) + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = table[nextHash] + now >>= 8 + table[nextHash] = tableEntry{offset: s} + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + cv = uint32(now) + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset + l := int16(matchLen(src[s+4:], src[t+4:]) + 4) + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + emitLiteral(dst, src[nextEmit:s]) + } + + // Save the match found + dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6416(src, s-2) + o := s - 2 + prevHash := hashSL(uint32(x)) + table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashSL(uint32(x)) + candidate = table[currHash] + table[currHash] = tableEntry{offset: o + 2} + + if uint32(x) != load3216(src, candidate.offset) { + cv = uint32(x >> 8) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index 141299b973803..b3df0d8941e12 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -4,6 +4,14 @@ package flate +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" +) + const ( // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused // 8 bits: xlength = length - MIN_MATCH_LENGTH @@ -46,6 +54,36 @@ var lengthCodes = [256]uint8{ 27, 27, 27, 27, 27, 28, } +// lengthCodes1 is length codes, but starting at 1. +var lengthCodes1 = [256]uint8{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, + 10, 10, 11, 11, 12, 12, 13, 13, 13, 13, + 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, + 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, + 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, + 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, + 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, + 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, 28, 28, 28, 29, +} + var offsetCodes = [256]uint32{ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, @@ -65,19 +103,236 @@ var offsetCodes = [256]uint32{ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, } +// offsetCodes14 are offsetCodes, but with 14 added. +var offsetCodes14 = [256]uint32{ + 14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, + 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, +} + type token uint32 type tokens struct { - tokens [maxStoreBlockSize + 1]token - n uint16 // Must be able to contain maxStoreBlockSize + nLits int + extraHist [32]uint16 // codes 256->maxnumlit + offHist [32]uint16 // offset codes + litHist [256]uint16 // codes 0->255 + n uint16 // Must be able to contain maxStoreBlockSize + tokens [maxStoreBlockSize + 1]token +} + +func (t *tokens) Reset() { + if t.n == 0 { + return + } + t.n = 0 + t.nLits = 0 + for i := range t.litHist[:] { + t.litHist[i] = 0 + } + for i := range t.extraHist[:] { + t.extraHist[i] = 0 + } + for i := range t.offHist[:] { + t.offHist[i] = 0 + } +} + +func (t *tokens) Fill() { + if t.n == 0 { + return + } + for i, v := range t.litHist[:] { + if v == 0 { + t.litHist[i] = 1 + t.nLits++ + } + } + for i, v := range t.extraHist[:literalCount-256] { + if v == 0 { + t.nLits++ + t.extraHist[i] = 1 + } + } + for i, v := range t.offHist[:offsetCodeCount] { + if v == 0 { + t.offHist[i] = 1 + } + } +} + +func indexTokens(in []token) tokens { + var t tokens + t.indexTokens(in) + return t +} + +func (t *tokens) indexTokens(in []token) { + t.Reset() + for _, tok := range in { + if tok < matchType { + t.tokens[t.n] = tok + t.litHist[tok]++ + t.n++ + continue + } + t.AddMatch(uint32(tok.length()), tok.offset()) + } } -// Convert a literal into a literal token. -func literalToken(literal uint32) token { return token(literalType + literal) } +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + ol := int(dst.n) + for i, v := range lit { + dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) + dst.litHist[v]++ + } + dst.n += uint16(len(lit)) + dst.nLits += len(lit) +} -// Convert a < xlength, xoffset > pair into a match token. -func matchToken(xlength uint32, xoffset uint32) token { - return token(matchType + xlength< 0 { + invTotal := 1.0 / float64(t.nLits) + for _, v := range t.litHist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + // Just add 15 for EOB + shannon += 15 + for _, v := range t.extraHist[1 : literalCount-256] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + bits += int(lengthExtraBits[v&31]) * int(v) + nMatches += int(v) + } + } + } + if nMatches > 0 { + invTotal := 1.0 / float64(nMatches) + for _, v := range t.offHist[:offsetCodeCount] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + bits += int(offsetExtraBits[v&31]) * int(n) + } + } + } + + return int(shannon) + bits +} + +// AddMatch adds a match to the tokens. +// This function is very sensitive to inlining and right on the border. +func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { + if debugDecode { + if xlength >= maxMatchLength+baseMatchLength { + panic(fmt.Errorf("invalid length: %v", xlength)) + } + if xoffset >= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + t.nLits++ + lengthCode := lengthCodes1[uint8(xlength)] & 31 + t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oc := offsetCode(xoffset) & 31 + for xlength > 0 { + xl := xlength + if xl > 258 { + // We need to have at least baseMatchLength left over for next loop. + xl = 258 - baseMatchLength + } + xlength -= xl + xl -= 3 + t.nLits++ + lengthCode := lengthCodes1[uint8(xl)] & 31 + t.tokens[t.n] = token(matchType | uint32(xl)<>7 < uint32(len(offsetCodes)) { + return offsetCodes[(off>>7)&255] + 14 + } else { + return offsetCodes[(off>>14)&255] + 28 + } + } if off < uint32(len(offsetCodes)) { - return offsetCodes[off&255] - } else if off>>7 < uint32(len(offsetCodes)) { - return offsetCodes[(off>>7)&255] + 14 - } else { - return offsetCodes[(off>>14)&255] + 28 + return offsetCodes[uint8(off)] } + return offsetCodes14[uint8(off>>7)] } diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go index 7da7ee7486eb8..ed0cc148f8c77 100644 --- a/vendor/github.com/klauspost/compress/gzip/gzip.go +++ b/vendor/github.com/klauspost/compress/gzip/gzip.go @@ -22,6 +22,13 @@ const ( DefaultCompression = flate.DefaultCompression ConstantCompression = flate.ConstantCompression HuffmanOnly = flate.HuffmanOnly + + // StatelessCompression will do compression but without maintaining any state + // between Write calls. + // There will be no memory kept between Write calls, + // but compression and speed will be suboptimal. + // Because of this, the size of actual Write calls will affect output size. + StatelessCompression = -3 ) // A Writer is an io.WriteCloser. @@ -59,7 +66,7 @@ func NewWriter(w io.Writer) *Writer { // integer value between BestSpeed and BestCompression inclusive. The error // returned will be nil if the level is valid. func NewWriterLevel(w io.Writer, level int) (*Writer, error) { - if level < HuffmanOnly || level > BestCompression { + if level < StatelessCompression || level > BestCompression { return nil, fmt.Errorf("gzip: invalid compression level: %d", level) } z := new(Writer) @@ -69,9 +76,12 @@ func NewWriterLevel(w io.Writer, level int) (*Writer, error) { func (z *Writer) init(w io.Writer, level int) { compressor := z.compressor - if compressor != nil { - compressor.Reset(w) + if level != StatelessCompression { + if compressor != nil { + compressor.Reset(w) + } } + *z = Writer{ Header: Header{ OS: 255, // unknown @@ -189,12 +199,16 @@ func (z *Writer) Write(p []byte) (int, error) { return n, z.err } } - if z.compressor == nil { + + if z.compressor == nil && z.level != StatelessCompression { z.compressor, _ = flate.NewWriter(z.w, z.level) } } z.size += uint32(len(p)) z.digest = crc32.Update(z.digest, crc32.IEEETable, p) + if z.level == StatelessCompression { + return len(p), flate.StatelessDeflate(z.w, p, false) + } n, z.err = z.compressor.Write(p) return n, z.err } @@ -211,7 +225,7 @@ func (z *Writer) Flush() error { if z.err != nil { return z.err } - if z.closed { + if z.closed || z.level == StatelessCompression { return nil } if !z.wroteHeader { @@ -240,7 +254,11 @@ func (z *Writer) Close() error { return z.err } } - z.err = z.compressor.Close() + if z.level == StatelessCompression { + z.err = flate.StatelessDeflate(z.w, nil, true) + } else { + z.err = z.compressor.Close() + } if z.err != nil { return z.err } diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore deleted file mode 100644 index daf913b1b347a..0000000000000 --- a/vendor/github.com/klauspost/cpuid/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml deleted file mode 100644 index 630192d597b2e..0000000000000 --- a/vendor/github.com/klauspost/cpuid/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go - -sudo: false - -os: - - linux - - osx -go: - - 1.8.x - - 1.9.x - - 1.10.x - - master - -script: - - go vet ./... - - go test -v ./... - - go test -race ./... - - diff <(gofmt -d .) <("") - -matrix: - allow_failures: - - go: 'master' - fast_finish: true diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt deleted file mode 100644 index 2ef4714f7165b..0000000000000 --- a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt +++ /dev/null @@ -1,35 +0,0 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2015- Klaus Post & Contributors. -Email: klauspost@gmail.com - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/cpuid/LICENSE b/vendor/github.com/klauspost/cpuid/LICENSE deleted file mode 100644 index 5cec7ee949b10..0000000000000 --- a/vendor/github.com/klauspost/cpuid/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md deleted file mode 100644 index a7fb41fbecbc3..0000000000000 --- a/vendor/github.com/klauspost/cpuid/README.md +++ /dev/null @@ -1,147 +0,0 @@ -# cpuid -Package cpuid provides information about the CPU running the current program. - -CPU features are detected on startup, and kept for fast access through the life of the application. -Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. - -You can access the CPU information by accessing the shared CPU variable of the cpuid library. - -Package home: https://github.com/klauspost/cpuid - -[![GoDoc][1]][2] [![Build Status][3]][4] - -[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg -[2]: https://godoc.org/github.com/klauspost/cpuid -[3]: https://travis-ci.org/klauspost/cpuid.svg -[4]: https://travis-ci.org/klauspost/cpuid - -# features -## CPU Instructions -* **CMOV** (i686 CMOV) -* **NX** (NX (No-Execute) bit) -* **AMD3DNOW** (AMD 3DNOW) -* **AMD3DNOWEXT** (AMD 3DNowExt) -* **MMX** (standard MMX) -* **MMXEXT** (SSE integer functions or AMD MMX ext) -* **SSE** (SSE functions) -* **SSE2** (P4 SSE functions) -* **SSE3** (Prescott SSE3 functions) -* **SSSE3** (Conroe SSSE3 functions) -* **SSE4** (Penryn SSE4.1 functions) -* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) -* **SSE42** (Nehalem SSE4.2 functions) -* **AVX** (AVX functions) -* **AVX2** (AVX2 functions) -* **FMA3** (Intel FMA 3) -* **FMA4** (Bulldozer FMA4 functions) -* **XOP** (Bulldozer XOP functions) -* **F16C** (Half-precision floating-point conversion) -* **BMI1** (Bit Manipulation Instruction Set 1) -* **BMI2** (Bit Manipulation Instruction Set 2) -* **TBM** (AMD Trailing Bit Manipulation) -* **LZCNT** (LZCNT instruction) -* **POPCNT** (POPCNT instruction) -* **AESNI** (Advanced Encryption Standard New Instructions) -* **CLMUL** (Carry-less Multiplication) -* **HTT** (Hyperthreading (enabled)) -* **HLE** (Hardware Lock Elision) -* **RTM** (Restricted Transactional Memory) -* **RDRAND** (RDRAND instruction is available) -* **RDSEED** (RDSEED instruction is available) -* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) -* **SHA** (Intel SHA Extensions) -* **AVX512F** (AVX-512 Foundation) -* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) -* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) -* **AVX512PF** (AVX-512 Prefetch Instructions) -* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) -* **AVX512CD** (AVX-512 Conflict Detection Instructions) -* **AVX512BW** (AVX-512 Byte and Word Instructions) -* **AVX512VL** (AVX-512 Vector Length Extensions) -* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) -* **MPX** (Intel MPX (Memory Protection Extensions)) -* **ERMS** (Enhanced REP MOVSB/STOSB) -* **RDTSCP** (RDTSCP Instruction) -* **CX16** (CMPXCHG16B Instruction) -* **SGX** (Software Guard Extensions, with activation details) - -## Performance -* **RDTSCP()** Returns current cycle count. Can be used for benchmarking. -* **SSE2SLOW** (SSE2 is supported, but usually not faster) -* **SSE3SLOW** (SSE3 is supported, but usually not faster) -* **ATOM** (Atom processor, some SSSE3 instructions are slower) -* **Cache line** (Probable size of a cache line). -* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. - -## Cpu Vendor/VM -* **Intel** -* **AMD** -* **VIA** -* **Transmeta** -* **NSC** -* **KVM** (Kernel-based Virtual Machine) -* **MSVM** (Microsoft Hyper-V or Windows Virtual PC) -* **VMware** -* **XenHVM** -* **Bhyve** -* **Hygon** - -# installing - -```go get github.com/klauspost/cpuid``` - -# example - -```Go -package main - -import ( - "fmt" - "github.com/klauspost/cpuid" -) - -func main() { - // Print basic CPU information: - fmt.Println("Name:", cpuid.CPU.BrandName) - fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores) - fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore) - fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores) - fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model) - fmt.Println("Features:", cpuid.CPU.Features) - fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine) - fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes") - fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes") - fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes") - fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes") - - // Test if we have a specific feature: - if cpuid.CPU.SSE() { - fmt.Println("We have Streaming SIMD Extensions") - } -} -``` - -Sample output: -``` ->go run main.go -Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz -PhysicalCores: 2 -ThreadsPerCore: 2 -LogicalCores: 4 -Family 6 Model: 42 -Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL -Cacheline bytes: 64 -We have Streaming SIMD Extensions -``` - -# private package - -In the "private" folder you can find an autogenerated version of the library you can include in your own packages. - -For this purpose all exports are removed, and functions and constants are lowercased. - -This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. - -# license - -This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go deleted file mode 100644 index db95913212311..0000000000000 --- a/vendor/github.com/klauspost/cpuid/cpuid.go +++ /dev/null @@ -1,1049 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// Package cpuid provides information about the CPU running the current program. -// -// CPU features are detected on startup, and kept for fast access through the life of the application. -// Currently x86 / x64 (AMD64) is supported. -// -// You can access the CPU information by accessing the shared CPU variable of the cpuid library. -// -// Package home: https://github.com/klauspost/cpuid -package cpuid - -import "strings" - -// Vendor is a representation of a CPU vendor. -type Vendor int - -const ( - Other Vendor = iota - Intel - AMD - VIA - Transmeta - NSC - KVM // Kernel-based Virtual Machine - MSVM // Microsoft Hyper-V or Windows Virtual PC - VMware - XenHVM - Bhyve - Hygon -) - -const ( - CMOV = 1 << iota // i686 CMOV - NX // NX (No-Execute) bit - AMD3DNOW // AMD 3DNOW - AMD3DNOWEXT // AMD 3DNowExt - MMX // standard MMX - MMXEXT // SSE integer functions or AMD MMX ext - SSE // SSE functions - SSE2 // P4 SSE functions - SSE3 // Prescott SSE3 functions - SSSE3 // Conroe SSSE3 functions - SSE4 // Penryn SSE4.1 functions - SSE4A // AMD Barcelona microarchitecture SSE4a instructions - SSE42 // Nehalem SSE4.2 functions - AVX // AVX functions - AVX2 // AVX2 functions - FMA3 // Intel FMA 3 - FMA4 // Bulldozer FMA4 functions - XOP // Bulldozer XOP functions - F16C // Half-precision floating-point conversion - BMI1 // Bit Manipulation Instruction Set 1 - BMI2 // Bit Manipulation Instruction Set 2 - TBM // AMD Trailing Bit Manipulation - LZCNT // LZCNT instruction - POPCNT // POPCNT instruction - AESNI // Advanced Encryption Standard New Instructions - CLMUL // Carry-less Multiplication - HTT // Hyperthreading (enabled) - HLE // Hardware Lock Elision - RTM // Restricted Transactional Memory - RDRAND // RDRAND instruction is available - RDSEED // RDSEED instruction is available - ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) - SHA // Intel SHA Extensions - AVX512F // AVX-512 Foundation - AVX512DQ // AVX-512 Doubleword and Quadword Instructions - AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions - AVX512PF // AVX-512 Prefetch Instructions - AVX512ER // AVX-512 Exponential and Reciprocal Instructions - AVX512CD // AVX-512 Conflict Detection Instructions - AVX512BW // AVX-512 Byte and Word Instructions - AVX512VL // AVX-512 Vector Length Extensions - AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions - MPX // Intel MPX (Memory Protection Extensions) - ERMS // Enhanced REP MOVSB/STOSB - RDTSCP // RDTSCP Instruction - CX16 // CMPXCHG16B Instruction - SGX // Software Guard Extensions - IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) - STIBP // Single Thread Indirect Branch Predictors - - // Performance indicators - SSE2SLOW // SSE2 is supported, but usually not faster - SSE3SLOW // SSE3 is supported, but usually not faster - ATOM // Atom processor, some SSSE3 instructions are slower -) - -var flagNames = map[Flags]string{ - CMOV: "CMOV", // i686 CMOV - NX: "NX", // NX (No-Execute) bit - AMD3DNOW: "AMD3DNOW", // AMD 3DNOW - AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt - MMX: "MMX", // Standard MMX - MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext - SSE: "SSE", // SSE functions - SSE2: "SSE2", // P4 SSE2 functions - SSE3: "SSE3", // Prescott SSE3 functions - SSSE3: "SSSE3", // Conroe SSSE3 functions - SSE4: "SSE4.1", // Penryn SSE4.1 functions - SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions - SSE42: "SSE4.2", // Nehalem SSE4.2 functions - AVX: "AVX", // AVX functions - AVX2: "AVX2", // AVX functions - FMA3: "FMA3", // Intel FMA 3 - FMA4: "FMA4", // Bulldozer FMA4 functions - XOP: "XOP", // Bulldozer XOP functions - F16C: "F16C", // Half-precision floating-point conversion - BMI1: "BMI1", // Bit Manipulation Instruction Set 1 - BMI2: "BMI2", // Bit Manipulation Instruction Set 2 - TBM: "TBM", // AMD Trailing Bit Manipulation - LZCNT: "LZCNT", // LZCNT instruction - POPCNT: "POPCNT", // POPCNT instruction - AESNI: "AESNI", // Advanced Encryption Standard New Instructions - CLMUL: "CLMUL", // Carry-less Multiplication - HTT: "HTT", // Hyperthreading (enabled) - HLE: "HLE", // Hardware Lock Elision - RTM: "RTM", // Restricted Transactional Memory - RDRAND: "RDRAND", // RDRAND instruction is available - RDSEED: "RDSEED", // RDSEED instruction is available - ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) - SHA: "SHA", // Intel SHA Extensions - AVX512F: "AVX512F", // AVX-512 Foundation - AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions - AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions - AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions - AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions - AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions - AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions - AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions - AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions - MPX: "MPX", // Intel MPX (Memory Protection Extensions) - ERMS: "ERMS", // Enhanced REP MOVSB/STOSB - RDTSCP: "RDTSCP", // RDTSCP Instruction - CX16: "CX16", // CMPXCHG16B Instruction - SGX: "SGX", // Software Guard Extensions - IBPB: "IBPB", // Indirect Branch Restricted Speculation and Indirect Branch Predictor Barrier - STIBP: "STIBP", // Single Thread Indirect Branch Predictors - - // Performance indicators - SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster - SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster - ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower - -} - -// CPUInfo contains information about the detected system CPU. -type CPUInfo struct { - BrandName string // Brand name reported by the CPU - VendorID Vendor // Comparable CPU vendor ID - Features Flags // Features of the CPU - PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. - ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. - LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. - Family int // CPU family number - Model int // CPU model number - CacheLine int // Cache line size in bytes. Will be 0 if undetectable. - Cache struct { - L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected - L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected - L2 int // L2 Cache (per core or shared). Will be -1 if undetected - L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected - } - SGX SGXSupport - maxFunc uint32 - maxExFunc uint32 -} - -var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) -var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) -var xgetbv func(index uint32) (eax, edx uint32) -var rdtscpAsm func() (eax, ebx, ecx, edx uint32) - -// CPU contains information about the CPU as detected on startup, -// or when Detect last was called. -// -// Use this as the primary entry point to you data, -// this way queries are -var CPU CPUInfo - -func init() { - initCPU() - Detect() -} - -// Detect will re-detect current CPU info. -// This will replace the content of the exported CPU variable. -// -// Unless you expect the CPU to change while you are running your program -// you should not need to call this function. -// If you call this, you must ensure that no other goroutine is accessing the -// exported CPU variable. -func Detect() { - CPU.maxFunc = maxFunctionID() - CPU.maxExFunc = maxExtendedFunction() - CPU.BrandName = brandName() - CPU.CacheLine = cacheLine() - CPU.Family, CPU.Model = familyModel() - CPU.Features = support() - CPU.SGX = hasSGX(CPU.Features&SGX != 0) - CPU.ThreadsPerCore = threadsPerCore() - CPU.LogicalCores = logicalCores() - CPU.PhysicalCores = physicalCores() - CPU.VendorID = vendorID() - CPU.cacheSize() -} - -// Generated here: http://play.golang.org/p/BxFH2Gdc0G - -// Cmov indicates support of CMOV instructions -func (c CPUInfo) Cmov() bool { - return c.Features&CMOV != 0 -} - -// Amd3dnow indicates support of AMD 3DNOW! instructions -func (c CPUInfo) Amd3dnow() bool { - return c.Features&AMD3DNOW != 0 -} - -// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions -func (c CPUInfo) Amd3dnowExt() bool { - return c.Features&AMD3DNOWEXT != 0 -} - -// MMX indicates support of MMX instructions -func (c CPUInfo) MMX() bool { - return c.Features&MMX != 0 -} - -// MMXExt indicates support of MMXEXT instructions -// (SSE integer functions or AMD MMX ext) -func (c CPUInfo) MMXExt() bool { - return c.Features&MMXEXT != 0 -} - -// SSE indicates support of SSE instructions -func (c CPUInfo) SSE() bool { - return c.Features&SSE != 0 -} - -// SSE2 indicates support of SSE 2 instructions -func (c CPUInfo) SSE2() bool { - return c.Features&SSE2 != 0 -} - -// SSE3 indicates support of SSE 3 instructions -func (c CPUInfo) SSE3() bool { - return c.Features&SSE3 != 0 -} - -// SSSE3 indicates support of SSSE 3 instructions -func (c CPUInfo) SSSE3() bool { - return c.Features&SSSE3 != 0 -} - -// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions -func (c CPUInfo) SSE4() bool { - return c.Features&SSE4 != 0 -} - -// SSE42 indicates support of SSE4.2 instructions -func (c CPUInfo) SSE42() bool { - return c.Features&SSE42 != 0 -} - -// AVX indicates support of AVX instructions -// and operating system support of AVX instructions -func (c CPUInfo) AVX() bool { - return c.Features&AVX != 0 -} - -// AVX2 indicates support of AVX2 instructions -func (c CPUInfo) AVX2() bool { - return c.Features&AVX2 != 0 -} - -// FMA3 indicates support of FMA3 instructions -func (c CPUInfo) FMA3() bool { - return c.Features&FMA3 != 0 -} - -// FMA4 indicates support of FMA4 instructions -func (c CPUInfo) FMA4() bool { - return c.Features&FMA4 != 0 -} - -// XOP indicates support of XOP instructions -func (c CPUInfo) XOP() bool { - return c.Features&XOP != 0 -} - -// F16C indicates support of F16C instructions -func (c CPUInfo) F16C() bool { - return c.Features&F16C != 0 -} - -// BMI1 indicates support of BMI1 instructions -func (c CPUInfo) BMI1() bool { - return c.Features&BMI1 != 0 -} - -// BMI2 indicates support of BMI2 instructions -func (c CPUInfo) BMI2() bool { - return c.Features&BMI2 != 0 -} - -// TBM indicates support of TBM instructions -// (AMD Trailing Bit Manipulation) -func (c CPUInfo) TBM() bool { - return c.Features&TBM != 0 -} - -// Lzcnt indicates support of LZCNT instruction -func (c CPUInfo) Lzcnt() bool { - return c.Features&LZCNT != 0 -} - -// Popcnt indicates support of POPCNT instruction -func (c CPUInfo) Popcnt() bool { - return c.Features&POPCNT != 0 -} - -// HTT indicates the processor has Hyperthreading enabled -func (c CPUInfo) HTT() bool { - return c.Features&HTT != 0 -} - -// SSE2Slow indicates that SSE2 may be slow on this processor -func (c CPUInfo) SSE2Slow() bool { - return c.Features&SSE2SLOW != 0 -} - -// SSE3Slow indicates that SSE3 may be slow on this processor -func (c CPUInfo) SSE3Slow() bool { - return c.Features&SSE3SLOW != 0 -} - -// AesNi indicates support of AES-NI instructions -// (Advanced Encryption Standard New Instructions) -func (c CPUInfo) AesNi() bool { - return c.Features&AESNI != 0 -} - -// Clmul indicates support of CLMUL instructions -// (Carry-less Multiplication) -func (c CPUInfo) Clmul() bool { - return c.Features&CLMUL != 0 -} - -// NX indicates support of NX (No-Execute) bit -func (c CPUInfo) NX() bool { - return c.Features&NX != 0 -} - -// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions -func (c CPUInfo) SSE4A() bool { - return c.Features&SSE4A != 0 -} - -// HLE indicates support of Hardware Lock Elision -func (c CPUInfo) HLE() bool { - return c.Features&HLE != 0 -} - -// RTM indicates support of Restricted Transactional Memory -func (c CPUInfo) RTM() bool { - return c.Features&RTM != 0 -} - -// Rdrand indicates support of RDRAND instruction is available -func (c CPUInfo) Rdrand() bool { - return c.Features&RDRAND != 0 -} - -// Rdseed indicates support of RDSEED instruction is available -func (c CPUInfo) Rdseed() bool { - return c.Features&RDSEED != 0 -} - -// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) -func (c CPUInfo) ADX() bool { - return c.Features&ADX != 0 -} - -// SHA indicates support of Intel SHA Extensions -func (c CPUInfo) SHA() bool { - return c.Features&SHA != 0 -} - -// AVX512F indicates support of AVX-512 Foundation -func (c CPUInfo) AVX512F() bool { - return c.Features&AVX512F != 0 -} - -// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions -func (c CPUInfo) AVX512DQ() bool { - return c.Features&AVX512DQ != 0 -} - -// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions -func (c CPUInfo) AVX512IFMA() bool { - return c.Features&AVX512IFMA != 0 -} - -// AVX512PF indicates support of AVX-512 Prefetch Instructions -func (c CPUInfo) AVX512PF() bool { - return c.Features&AVX512PF != 0 -} - -// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions -func (c CPUInfo) AVX512ER() bool { - return c.Features&AVX512ER != 0 -} - -// AVX512CD indicates support of AVX-512 Conflict Detection Instructions -func (c CPUInfo) AVX512CD() bool { - return c.Features&AVX512CD != 0 -} - -// AVX512BW indicates support of AVX-512 Byte and Word Instructions -func (c CPUInfo) AVX512BW() bool { - return c.Features&AVX512BW != 0 -} - -// AVX512VL indicates support of AVX-512 Vector Length Extensions -func (c CPUInfo) AVX512VL() bool { - return c.Features&AVX512VL != 0 -} - -// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions -func (c CPUInfo) AVX512VBMI() bool { - return c.Features&AVX512VBMI != 0 -} - -// MPX indicates support of Intel MPX (Memory Protection Extensions) -func (c CPUInfo) MPX() bool { - return c.Features&MPX != 0 -} - -// ERMS indicates support of Enhanced REP MOVSB/STOSB -func (c CPUInfo) ERMS() bool { - return c.Features&ERMS != 0 -} - -// RDTSCP Instruction is available. -func (c CPUInfo) RDTSCP() bool { - return c.Features&RDTSCP != 0 -} - -// CX16 indicates if CMPXCHG16B instruction is available. -func (c CPUInfo) CX16() bool { - return c.Features&CX16 != 0 -} - -// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection. -// So TSX simply checks that. -func (c CPUInfo) TSX() bool { - return c.Features&(HLE|RTM) == HLE|RTM -} - -// Atom indicates an Atom processor -func (c CPUInfo) Atom() bool { - return c.Features&ATOM != 0 -} - -// Intel returns true if vendor is recognized as Intel -func (c CPUInfo) Intel() bool { - return c.VendorID == Intel -} - -// AMD returns true if vendor is recognized as AMD -func (c CPUInfo) AMD() bool { - return c.VendorID == AMD -} - -// Hygon returns true if vendor is recognized as Hygon -func (c CPUInfo) Hygon() bool { - return c.VendorID == Hygon -} - -// Transmeta returns true if vendor is recognized as Transmeta -func (c CPUInfo) Transmeta() bool { - return c.VendorID == Transmeta -} - -// NSC returns true if vendor is recognized as National Semiconductor -func (c CPUInfo) NSC() bool { - return c.VendorID == NSC -} - -// VIA returns true if vendor is recognized as VIA -func (c CPUInfo) VIA() bool { - return c.VendorID == VIA -} - -// RTCounter returns the 64-bit time-stamp counter -// Uses the RDTSCP instruction. The value 0 is returned -// if the CPU does not support the instruction. -func (c CPUInfo) RTCounter() uint64 { - if !c.RDTSCP() { - return 0 - } - a, _, _, d := rdtscpAsm() - return uint64(a) | (uint64(d) << 32) -} - -// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. -// This variable is OS dependent, but on Linux contains information -// about the current cpu/core the code is running on. -// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. -func (c CPUInfo) Ia32TscAux() uint32 { - if !c.RDTSCP() { - return 0 - } - _, _, ecx, _ := rdtscpAsm() - return ecx -} - -// LogicalCPU will return the Logical CPU the code is currently executing on. -// This is likely to change when the OS re-schedules the running thread -// to another CPU. -// If the current core cannot be detected, -1 will be returned. -func (c CPUInfo) LogicalCPU() int { - if c.maxFunc < 1 { - return -1 - } - _, ebx, _, _ := cpuid(1) - return int(ebx >> 24) -} - -// VM Will return true if the cpu id indicates we are in -// a virtual machine. This is only a hint, and will very likely -// have many false negatives. -func (c CPUInfo) VM() bool { - switch c.VendorID { - case MSVM, KVM, VMware, XenHVM, Bhyve: - return true - } - return false -} - -// Flags contains detected cpu features and caracteristics -type Flags uint64 - -// String returns a string representation of the detected -// CPU features. -func (f Flags) String() string { - return strings.Join(f.Strings(), ",") -} - -// Strings returns and array of the detected features. -func (f Flags) Strings() []string { - s := support() - r := make([]string, 0, 20) - for i := uint(0); i < 64; i++ { - key := Flags(1 << i) - val := flagNames[key] - if s&key != 0 { - r = append(r, val) - } - } - return r -} - -func maxExtendedFunction() uint32 { - eax, _, _, _ := cpuid(0x80000000) - return eax -} - -func maxFunctionID() uint32 { - a, _, _, _ := cpuid(0) - return a -} - -func brandName() string { - if maxExtendedFunction() >= 0x80000004 { - v := make([]uint32, 0, 48) - for i := uint32(0); i < 3; i++ { - a, b, c, d := cpuid(0x80000002 + i) - v = append(v, a, b, c, d) - } - return strings.Trim(string(valAsString(v...)), " ") - } - return "unknown" -} - -func threadsPerCore() int { - mfi := maxFunctionID() - if mfi < 0x4 || vendorID() != Intel { - return 1 - } - - if mfi < 0xb { - _, b, _, d := cpuid(1) - if (d & (1 << 28)) != 0 { - // v will contain logical core count - v := (b >> 16) & 255 - if v > 1 { - a4, _, _, _ := cpuid(4) - // physical cores - v2 := (a4 >> 26) + 1 - if v2 > 0 { - return int(v) / int(v2) - } - } - } - return 1 - } - _, b, _, _ := cpuidex(0xb, 0) - if b&0xffff == 0 { - return 1 - } - return int(b & 0xffff) -} - -func logicalCores() int { - mfi := maxFunctionID() - switch vendorID() { - case Intel: - // Use this on old Intel processors - if mfi < 0xb { - if mfi < 1 { - return 0 - } - // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) - // that can be assigned to logical processors in a physical package. - // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. - _, ebx, _, _ := cpuid(1) - logical := (ebx >> 16) & 0xff - return int(logical) - } - _, b, _, _ := cpuidex(0xb, 1) - return int(b & 0xffff) - case AMD, Hygon: - _, b, _, _ := cpuid(1) - return int((b >> 16) & 0xff) - default: - return 0 - } -} - -func familyModel() (int, int) { - if maxFunctionID() < 0x1 { - return 0, 0 - } - eax, _, _, _ := cpuid(1) - family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) - model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) - return int(family), int(model) -} - -func physicalCores() int { - switch vendorID() { - case Intel: - return logicalCores() / threadsPerCore() - case AMD, Hygon: - if maxExtendedFunction() >= 0x80000008 { - _, _, c, _ := cpuid(0x80000008) - return int(c&0xff) + 1 - } - } - return 0 -} - -// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID -var vendorMapping = map[string]Vendor{ - "AMDisbetter!": AMD, - "AuthenticAMD": AMD, - "CentaurHauls": VIA, - "GenuineIntel": Intel, - "TransmetaCPU": Transmeta, - "GenuineTMx86": Transmeta, - "Geode by NSC": NSC, - "VIA VIA VIA ": VIA, - "KVMKVMKVMKVM": KVM, - "Microsoft Hv": MSVM, - "VMwareVMware": VMware, - "XenVMMXenVMM": XenHVM, - "bhyve bhyve ": Bhyve, - "HygonGenuine": Hygon, -} - -func vendorID() Vendor { - _, b, c, d := cpuid(0) - v := valAsString(b, d, c) - vend, ok := vendorMapping[string(v)] - if !ok { - return Other - } - return vend -} - -func cacheLine() int { - if maxFunctionID() < 0x1 { - return 0 - } - - _, ebx, _, _ := cpuid(1) - cache := (ebx & 0xff00) >> 5 // cflush size - if cache == 0 && maxExtendedFunction() >= 0x80000006 { - _, _, ecx, _ := cpuid(0x80000006) - cache = ecx & 0xff // cacheline size - } - // TODO: Read from Cache and TLB Information - return int(cache) -} - -func (c *CPUInfo) cacheSize() { - c.Cache.L1D = -1 - c.Cache.L1I = -1 - c.Cache.L2 = -1 - c.Cache.L3 = -1 - vendor := vendorID() - switch vendor { - case Intel: - if maxFunctionID() < 4 { - return - } - for i := uint32(0); ; i++ { - eax, ebx, ecx, _ := cpuidex(4, i) - cacheType := eax & 15 - if cacheType == 0 { - break - } - cacheLevel := (eax >> 5) & 7 - coherency := int(ebx&0xfff) + 1 - partitions := int((ebx>>12)&0x3ff) + 1 - associativity := int((ebx>>22)&0x3ff) + 1 - sets := int(ecx) + 1 - size := associativity * partitions * coherency * sets - switch cacheLevel { - case 1: - if cacheType == 1 { - // 1 = Data Cache - c.Cache.L1D = size - } else if cacheType == 2 { - // 2 = Instruction Cache - c.Cache.L1I = size - } else { - if c.Cache.L1D < 0 { - c.Cache.L1I = size - } - if c.Cache.L1I < 0 { - c.Cache.L1I = size - } - } - case 2: - c.Cache.L2 = size - case 3: - c.Cache.L3 = size - } - } - case AMD, Hygon: - // Untested. - if maxExtendedFunction() < 0x80000005 { - return - } - _, _, ecx, edx := cpuid(0x80000005) - c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) - c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) - - if maxExtendedFunction() < 0x80000006 { - return - } - _, _, ecx, _ = cpuid(0x80000006) - c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) - } - - return -} - -type SGXSupport struct { - Available bool - SGX1Supported bool - SGX2Supported bool - MaxEnclaveSizeNot64 int64 - MaxEnclaveSize64 int64 -} - -func hasSGX(available bool) (rval SGXSupport) { - rval.Available = available - - if !available { - return - } - - a, _, _, d := cpuidex(0x12, 0) - rval.SGX1Supported = a&0x01 != 0 - rval.SGX2Supported = a&0x02 != 0 - rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 - rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 - - return -} - -func support() Flags { - mfi := maxFunctionID() - vend := vendorID() - if mfi < 0x1 { - return 0 - } - rval := uint64(0) - _, _, c, d := cpuid(1) - if (d & (1 << 15)) != 0 { - rval |= CMOV - } - if (d & (1 << 23)) != 0 { - rval |= MMX - } - if (d & (1 << 25)) != 0 { - rval |= MMXEXT - } - if (d & (1 << 25)) != 0 { - rval |= SSE - } - if (d & (1 << 26)) != 0 { - rval |= SSE2 - } - if (c & 1) != 0 { - rval |= SSE3 - } - if (c & 0x00000200) != 0 { - rval |= SSSE3 - } - if (c & 0x00080000) != 0 { - rval |= SSE4 - } - if (c & 0x00100000) != 0 { - rval |= SSE42 - } - if (c & (1 << 25)) != 0 { - rval |= AESNI - } - if (c & (1 << 1)) != 0 { - rval |= CLMUL - } - if c&(1<<23) != 0 { - rval |= POPCNT - } - if c&(1<<30) != 0 { - rval |= RDRAND - } - if c&(1<<29) != 0 { - rval |= F16C - } - if c&(1<<13) != 0 { - rval |= CX16 - } - if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { - if threadsPerCore() > 1 { - rval |= HTT - } - } - - // Check XGETBV, OXSAVE and AVX bits - if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { - // Check for OS support - eax, _ := xgetbv(0) - if (eax & 0x6) == 0x6 { - rval |= AVX - if (c & 0x00001000) != 0 { - rval |= FMA3 - } - } - } - - // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. - if mfi >= 7 { - _, ebx, ecx, edx := cpuidex(7, 0) - if (rval&AVX) != 0 && (ebx&0x00000020) != 0 { - rval |= AVX2 - } - if (ebx & 0x00000008) != 0 { - rval |= BMI1 - if (ebx & 0x00000100) != 0 { - rval |= BMI2 - } - } - if ebx&(1<<2) != 0 { - rval |= SGX - } - if ebx&(1<<4) != 0 { - rval |= HLE - } - if ebx&(1<<9) != 0 { - rval |= ERMS - } - if ebx&(1<<11) != 0 { - rval |= RTM - } - if ebx&(1<<14) != 0 { - rval |= MPX - } - if ebx&(1<<18) != 0 { - rval |= RDSEED - } - if ebx&(1<<19) != 0 { - rval |= ADX - } - if ebx&(1<<29) != 0 { - rval |= SHA - } - if edx&(1<<26) != 0 { - rval |= IBPB - } - if edx&(1<<27) != 0 { - rval |= STIBP - } - - // Only detect AVX-512 features if XGETBV is supported - if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { - // Check for OS support - eax, _ := xgetbv(0) - - // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and - // ZMM16-ZMM31 state are enabled by OS) - /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). - if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { - if ebx&(1<<16) != 0 { - rval |= AVX512F - } - if ebx&(1<<17) != 0 { - rval |= AVX512DQ - } - if ebx&(1<<21) != 0 { - rval |= AVX512IFMA - } - if ebx&(1<<26) != 0 { - rval |= AVX512PF - } - if ebx&(1<<27) != 0 { - rval |= AVX512ER - } - if ebx&(1<<28) != 0 { - rval |= AVX512CD - } - if ebx&(1<<30) != 0 { - rval |= AVX512BW - } - if ebx&(1<<31) != 0 { - rval |= AVX512VL - } - // ecx - if ecx&(1<<1) != 0 { - rval |= AVX512VBMI - } - } - } - } - - if maxExtendedFunction() >= 0x80000001 { - _, _, c, d := cpuid(0x80000001) - if (c & (1 << 5)) != 0 { - rval |= LZCNT - rval |= POPCNT - } - if (d & (1 << 31)) != 0 { - rval |= AMD3DNOW - } - if (d & (1 << 30)) != 0 { - rval |= AMD3DNOWEXT - } - if (d & (1 << 23)) != 0 { - rval |= MMX - } - if (d & (1 << 22)) != 0 { - rval |= MMXEXT - } - if (c & (1 << 6)) != 0 { - rval |= SSE4A - } - if d&(1<<20) != 0 { - rval |= NX - } - if d&(1<<27) != 0 { - rval |= RDTSCP - } - - /* Allow for selectively disabling SSE2 functions on AMD processors - with SSE2 support but not SSE4a. This includes Athlon64, some - Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster - than SSE2 often enough to utilize this special-case flag. - AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case - so that SSE2 is used unless explicitly disabled by checking - AV_CPU_FLAG_SSE2SLOW. */ - if vendorID() != Intel && - rval&SSE2 != 0 && (c&0x00000040) == 0 { - rval |= SSE2SLOW - } - - /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be - * used unless the OS has AVX support. */ - if (rval & AVX) != 0 { - if (c & 0x00000800) != 0 { - rval |= XOP - } - if (c & 0x00010000) != 0 { - rval |= FMA4 - } - } - - if vendorID() == Intel { - family, model := familyModel() - if family == 6 && (model == 9 || model == 13 || model == 14) { - /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and - * 6/14 (core1 "yonah") theoretically support sse2, but it's - * usually slower than mmx. */ - if (rval & SSE2) != 0 { - rval |= SSE2SLOW - } - if (rval & SSE3) != 0 { - rval |= SSE3SLOW - } - } - /* The Atom processor has SSSE3 support, which is useful in many cases, - * but sometimes the SSSE3 version is slower than the SSE2 equivalent - * on the Atom, but is generally faster on other processors supporting - * SSSE3. This flag allows for selectively disabling certain SSSE3 - * functions on the Atom. */ - if family == 6 && model == 28 { - rval |= ATOM - } - } - } - return Flags(rval) -} - -func valAsString(values ...uint32) []byte { - r := make([]byte, 4*len(values)) - for i, v := range values { - dst := r[i*4:] - dst[0] = byte(v & 0xff) - dst[1] = byte((v >> 8) & 0xff) - dst[2] = byte((v >> 16) & 0xff) - dst[3] = byte((v >> 24) & 0xff) - switch { - case dst[0] == 0: - return r[:i*4] - case dst[1] == 0: - return r[:i*4+1] - case dst[2] == 0: - return r[:i*4+2] - case dst[3] == 0: - return r[:i*4+3] - } - } - return r -} diff --git a/vendor/github.com/klauspost/cpuid/cpuid_386.s b/vendor/github.com/klauspost/cpuid/cpuid_386.s deleted file mode 100644 index 4d731711e48f2..0000000000000 --- a/vendor/github.com/klauspost/cpuid/cpuid_386.s +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// +build 386,!gccgo - -// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuid(SB), 7, $0 - XORL CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+4(FP) - MOVL BX, ebx+8(FP) - MOVL CX, ecx+12(FP) - MOVL DX, edx+16(FP) - RET - -// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv(index uint32) (eax, edx uint32) -TEXT ·asmXgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+4(FP) - MOVL DX, edx+8(FP) - RET - -// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) -TEXT ·asmRdtscpAsm(SB), 7, $0 - BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP - MOVL AX, eax+0(FP) - MOVL BX, ebx+4(FP) - MOVL CX, ecx+8(FP) - MOVL DX, edx+12(FP) - RET diff --git a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s deleted file mode 100644 index 3c1d60e422125..0000000000000 --- a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build amd64,!gccgo - -// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuid(SB), 7, $0 - XORQ CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func asmXgetbv(index uint32) (eax, edx uint32) -TEXT ·asmXgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+8(FP) - MOVL DX, edx+12(FP) - RET - -// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) -TEXT ·asmRdtscpAsm(SB), 7, $0 - BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP - MOVL AX, eax+0(FP) - MOVL BX, ebx+4(FP) - MOVL CX, ecx+8(FP) - MOVL DX, edx+12(FP) - RET diff --git a/vendor/github.com/klauspost/cpuid/detect_intel.go b/vendor/github.com/klauspost/cpuid/detect_intel.go deleted file mode 100644 index a5f04dd6d0a77..0000000000000 --- a/vendor/github.com/klauspost/cpuid/detect_intel.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// +build 386,!gccgo amd64,!gccgo - -package cpuid - -func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -func asmXgetbv(index uint32) (eax, edx uint32) -func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) - -func initCPU() { - cpuid = asmCpuid - cpuidex = asmCpuidex - xgetbv = asmXgetbv - rdtscpAsm = asmRdtscpAsm -} diff --git a/vendor/github.com/klauspost/cpuid/detect_ref.go b/vendor/github.com/klauspost/cpuid/detect_ref.go deleted file mode 100644 index 909c5d9a7aed6..0000000000000 --- a/vendor/github.com/klauspost/cpuid/detect_ref.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// +build !amd64,!386 gccgo - -package cpuid - -func initCPU() { - cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 - } - - cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 - } - - xgetbv = func(index uint32) (eax, edx uint32) { - return 0, 0 - } - - rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 - } -} diff --git a/vendor/github.com/klauspost/cpuid/generate.go b/vendor/github.com/klauspost/cpuid/generate.go deleted file mode 100644 index 90e7a98d278da..0000000000000 --- a/vendor/github.com/klauspost/cpuid/generate.go +++ /dev/null @@ -1,4 +0,0 @@ -package cpuid - -//go:generate go run private-gen.go -//go:generate gofmt -w ./private diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore new file mode 100644 index 0000000000000..5e987350471d0 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.gitignore @@ -0,0 +1,34 @@ +# Created by https://www.gitignore.io/api/macos + +### macOS ### +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# End of https://www.gitignore.io/api/macos + +cmd/*/*exe +.idea \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml new file mode 100644 index 0000000000000..fd6c6db713d3a --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.travis.yml @@ -0,0 +1,24 @@ +language: go + +env: + - GO111MODULE=off + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - master + +matrix: + fast_finish: true + allow_failures: + - go: master + +sudo: false + +script: + - go test -v -cpu=2 + - go test -v -cpu=2 -race + - go test -v -cpu=2 -tags noasm + - go test -v -cpu=2 -race -tags noasm diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE new file mode 100644 index 0000000000000..bd899d8353dd5 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md new file mode 100644 index 0000000000000..4ee388e81bfb9 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/README.md @@ -0,0 +1,90 @@ +# lz4 : LZ4 compression in pure Go + +[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4) +[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) +[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) + +## Overview + +This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. +The implementation is based on the reference C [one](https://github.com/lz4/lz4). + +## Install + +Assuming you have the go toolchain installed: + +``` +go get github.com/pierrec/lz4 +``` + +There is a command line interface tool to compress and decompress LZ4 files. + +``` +go install github.com/pierrec/lz4/cmd/lz4c +``` + +Usage + +``` +Usage of lz4c: + -version + print the program version + +Subcommands: +Compress the given files or from stdin to stdout. +compress [arguments] [ ...] + -bc + enable block checksum + -l int + compression level (0=fastest) + -sc + disable stream checksum + -size string + block max size [64K,256K,1M,4M] (default "4M") + +Uncompress the given files or from stdin to stdout. +uncompress [arguments] [ ...] + +``` + + +## Example + +``` +// Compress and uncompress an input string. +s := "hello world" +r := strings.NewReader(s) + +// The pipe will uncompress the data from the writer. +pr, pw := io.Pipe() +zw := lz4.NewWriter(pw) +zr := lz4.NewReader(pr) + +go func() { + // Compress the input string. + _, _ = io.Copy(zw, r) + _ = zw.Close() // Make sure the writer is closed + _ = pw.Close() // Terminate the pipe +}() + +_, _ = io.Copy(os.Stdout, zr) + +// Output: +// hello world +``` + +## Contributing + +Contributions are very welcome for bug fixing, performance improvements...! + +- Open an issue with a proper description +- Send a pull request with appropriate test case(s) + +## Contributors + +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! + +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. + +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go new file mode 100644 index 0000000000000..ee178a992b11a --- /dev/null +++ b/vendor/github.com/pierrec/lz4/block.go @@ -0,0 +1,387 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "math/bits" +) + +// blockHash hashes the lower 6 bytes into a value < htSize. +func blockHash(x uint64) uint32 { + const prime6bytes = 227718039650203 + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) +} + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte) (int, error) { + if len(src) == 0 { + return 0, nil + } + if di := decodeBlock(dst, src); di >= 0 { + return di, nil + } + return 0, ErrInvalidSourceShortBuffer +} + +// CompressBlock compresses the source buffer into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// The size of hashTable must be at least 64Kb. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { + if len(hashTable) < htSize { + return 0, fmt.Errorf("hash table too small, should be at least %d in size", htSize) + } + defer recoverBlock(&err) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compresssion. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 { + return 0, nil + } + // Prove to the compiler the table has at least htSize elements. + // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. + hashTable = hashTable[:htSize] + + // si: Current position of the search. + // anchor: Position of the current literals. + var si, di, anchor int + + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. + for si < sn { + // Hash the next 6 bytes (sequence)... + match := binary.LittleEndian.Uint64(src[si:]) + h := blockHash(match) + h2 := blockHash(match >> 8) + + // We check a match at s, s+1 and s+2 and pick the first one we get. + // Checking 3 only requires us to load the source one. + ref := hashTable[h] + ref2 := hashTable[h2] + hashTable[h] = si + hashTable[h2] = si + 1 + offset := si - ref + + // If offset <= 0 we got an old entry in the hash table. + if offset <= 0 || offset >= winSize || // Out of window. + uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. + // No match. Start calculating another hash. + // The processor can usually do this out-of-order. + h = blockHash(match >> 16) + ref = hashTable[h] + + // Check the second match at si+1 + si += 1 + offset = si - ref2 + + if offset <= 0 || offset >= winSize || + uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + // No match. Check the third match at si+2 + si += 1 + offset = si - ref + hashTable[h] = si + + if offset <= 0 || offset >= winSize || + uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { + // Skip one extra byte (at si+3) before we check 3 matches again. + si += 2 + (si-anchor)>>adaptSkipLog + continue + } + } + } + + // Match found. + lLen := si - anchor // Literal length. + // We already matched 4 bytes. + mLen := 4 + + // Extend backwards if we can, reducing literals. + tOff := si - offset - 1 + for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { + si-- + tOff-- + lLen-- + mLen++ + } + + // Add the match length, so we continue search at the end. + // Use mLen to store the offset base. + si, mLen = si+mLen, si+minMatch + + // Find the longest match by looking by batches of 8 bytes. + for si < sn { + x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) + if x == 0 { + si += 8 + } else { + // Stop is first non-zero byte. + si += bits.TrailingZeros64(x) >> 3 + break + } + } + + mLen = si - mLen + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + 2 + anchor = si + + // Encode offset. + _ = dst[di] // Bound check elimination. + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + // Check if we can load next values. + if si >= sn { + break + } + // Hash match end-2 + h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) + hashTable[h] = si - 2 + } + + if anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} + +// blockHash hashes 4 bytes into a value < winSize. +func blockHashHC(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> (32 - winSizeLog) +} + +// CompressBlockHC compresses the source buffer src into the destination dst +// with max search depth (use 0 or negative value for no max). +// +// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { + defer recoverBlock(&err) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compresssion. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 { + return 0, nil + } + var si, di int + + // hashTable: stores the last position found for a given hash + // chainTable: stores previous positions for a given hash + var hashTable, chainTable [winSize]int + + if depth <= 0 { + depth = winSize + } + + anchor := si + for si < sn { + // Hash the next 4 bytes (sequence). + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHashHC(match) + + // Follow the chain until out of window and give the longest match. + mLen := 0 + offset := 0 + for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { + // The first (mLen==0) or next byte (mLen>=minMatch) at current match length + // must match to improve on the match length. + if src[next+mLen] != src[si+mLen] { + continue + } + ml := 0 + // Compare the current position with a previous with the same hash. + for ml < sn-si { + x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) + if x == 0 { + ml += 8 + } else { + // Stop is first non-zero byte. + ml += bits.TrailingZeros64(x) >> 3 + break + } + } + if ml < minMatch || ml <= mLen { + // Match too small (>adaptSkipLog + continue + } + + // Match found. + // Update hash/chain tables with overlapping bytes: + // si already hashed, add everything from si+1 up to the match length. + winStart := si + 1 + if ws := si + mLen - winSize; ws > winStart { + winStart = ws + } + for si, ml := winStart, si+mLen; si < ml; { + match >>= 8 + match |= uint32(src[si+3]) << 24 + h := blockHashHC(match) + chainTable[si&winMask] = hashTable[h] + hashTable[h] = si + si++ + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // Match length does not include minMatch. + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + anchor = si + + // Encode offset. + di += 2 + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + } + + if anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go new file mode 100644 index 0000000000000..bc5e78d40f0a3 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug.go @@ -0,0 +1,23 @@ +// +build lz4debug + +package lz4 + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +const debugFlag = true + +func debug(args ...interface{}) { + _, file, line, _ := runtime.Caller(1) + file = filepath.Base(file) + + f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) + if f[len(f)-1] != '\n' { + f += "\n" + } + fmt.Fprintf(os.Stderr, f, args[1:]...) +} diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go new file mode 100644 index 0000000000000..44211ad96453b --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug_stub.go @@ -0,0 +1,7 @@ +// +build !lz4debug + +package lz4 + +const debugFlag = false + +func debug(args ...interface{}) {} diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go new file mode 100644 index 0000000000000..43cc14fbe2e37 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.go @@ -0,0 +1,8 @@ +// +build !appengine +// +build gc +// +build !noasm + +package lz4 + +//go:noescape +func decodeBlock(dst, src []byte) int diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s new file mode 100644 index 0000000000000..20fef39759cb6 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.s @@ -0,0 +1,375 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// AX scratch +// BX scratch +// CX scratch +// DX token +// +// DI &dst +// SI &src +// R8 &dst + len(dst) +// R9 &src + len(src) +// R11 &dst +// R12 short output end +// R13 short input end +// func decodeBlock(dst, src []byte) int +// using 50 bytes of stack currently +TEXT ·decodeBlock(SB), NOSPLIT, $64-56 + MOVQ dst_base+0(FP), DI + MOVQ DI, R11 + MOVQ dst_len+8(FP), R8 + ADDQ DI, R8 + + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R9 + ADDQ SI, R9 + + // shortcut ends + // short output end + MOVQ R8, R12 + SUBQ $32, R12 + // short input end + MOVQ R9, R13 + SUBQ $16, R13 + +loop: + // for si < len(src) + CMPQ SI, R9 + JGE end + + // token := uint32(src[si]) + MOVBQZX (SI), DX + INCQ SI + + // lit_len = token >> 4 + // if lit_len > 0 + // CX = lit_len + MOVQ DX, CX + SHRQ $4, CX + + // if lit_len != 0xF + CMPQ CX, $0xF + JEQ lit_len_loop_pre + CMPQ DI, R12 + JGE lit_len_loop_pre + CMPQ SI, R13 + JGE lit_len_loop_pre + + // copy shortcut + + // A two-stage shortcut for the most common case: + // 1) If the literal length is 0..14, and there is enough space, + // enter the shortcut and copy 16 bytes on behalf of the literals + // (in the fast mode, only 8 bytes can be safely copied this way). + // 2) Further if the match length is 4..18, copy 18 bytes in a similar + // manner; but we ensure that there's enough space in the output for + // those 18 bytes earlier, upon entering the shortcut (in other words, + // there is a combined check for both stages). + + // copy literal + MOVOU (SI), X0 + MOVOU X0, (DI) + ADDQ CX, DI + ADDQ CX, SI + + MOVQ DX, CX + ANDQ $0xF, CX + + // The second stage: prepare for match copying, decode full info. + // If it doesn't work out, the info won't be wasted. + // offset := uint16(data[:2]) + MOVWQZX (SI), DX + ADDQ $2, SI + + MOVQ DI, AX + SUBQ DX, AX + CMPQ AX, DI + JGT err_short_buf + + // if we can't do the second stage then jump straight to read the + // match length, we already have the offset. + CMPQ CX, $0xF + JEQ match_len_loop_pre + CMPQ DX, $8 + JLT match_len_loop_pre + CMPQ AX, R11 + JLT err_short_buf + + // memcpy(op + 0, match + 0, 8); + MOVQ (AX), BX + MOVQ BX, (DI) + // memcpy(op + 8, match + 8, 8); + MOVQ 8(AX), BX + MOVQ BX, 8(DI) + // memcpy(op +16, match +16, 2); + MOVW 16(AX), BX + MOVW BX, 16(DI) + + ADDQ $4, DI // minmatch + ADDQ CX, DI + + // shortcut complete, load next token + JMP loop + +lit_len_loop_pre: + // if lit_len > 0 + CMPQ CX, $0 + JEQ offset + CMPQ CX, $0xF + JNE copy_literal + +lit_len_loop: + // for src[si] == 0xFF + CMPB (SI), $0xFF + JNE lit_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + // lit_len += 0xFF + ADDQ $0xFF, CX + INCQ SI + JMP lit_len_loop + +lit_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_literal: + // bounds check src and dst + MOVQ SI, AX + ADDQ CX, AX + CMPQ AX, R9 + JGT err_short_buf + + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // whats a good cut off to call memmove? + CMPQ CX, $16 + JGT memmove_lit + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_lit + + // if len(src[si:]) < 16 + MOVQ R9, AX + SUBQ SI, AX + CMPQ AX, $16 + JLT memmove_lit + + MOVOU (SI), X0 + MOVOU X0, (DI) + + JMP finish_lit_copy + +memmove_lit: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + MOVB DX, 48(SP) + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVB 48(SP), DX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + +finish_lit_copy: + ADDQ CX, SI + ADDQ CX, DI + + CMPQ SI, R9 + JGE end + +offset: + // CX := mLen + // free up DX to use for offset + MOVQ DX, CX + + MOVQ SI, AX + ADDQ $2, AX + CMPQ AX, R9 + JGT err_short_buf + + // offset + // DX := int(src[si]) | int(src[si+1])<<8 + MOVWQZX (SI), DX + ADDQ $2, SI + + // 0 offset is invalid + CMPQ DX, $0 + JEQ err_corrupt + + ANDB $0xF, CX + +match_len_loop_pre: + // if mlen != 0xF + CMPB CX, $0xF + JNE copy_match + +match_len_loop: + // for src[si] == 0xFF + // lit_len += 0xFF + CMPB (SI), $0xFF + JNE match_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + ADDQ $0xFF, CX + INCQ SI + JMP match_len_loop + +match_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_match: + // mLen += minMatch + ADDQ $4, CX + + // check we have match_len bytes left in dst + // di+match_len < len(dst) + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // DX = offset + // CX = match_len + // BX = &dst + (di - offset) + MOVQ DI, BX + SUBQ DX, BX + + // check BX is within dst + // if BX < &dst + CMPQ BX, R11 + JLT err_short_buf + + // if offset + match_len < di + MOVQ BX, AX + ADDQ CX, AX + CMPQ DI, AX + JGT copy_interior_match + + // AX := len(dst[:di]) + // MOVQ DI, AX + // SUBQ R11, AX + + // copy 16 bytes at a time + // if di-offset < 16 copy 16-(di-offset) bytes to di + // then do the remaining + +copy_match_loop: + // for match_len >= 0 + // dst[di] = dst[i] + // di++ + // i++ + MOVB (BX), AX + MOVB AX, (DI) + INCQ DI + INCQ BX + DECQ CX + + CMPQ CX, $0 + JGT copy_match_loop + + JMP loop + +copy_interior_match: + CMPQ CX, $16 + JGT memmove_match + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_match + + MOVOU (BX), X0 + MOVOU X0, (DI) + + ADDQ CX, DI + JMP loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + + ADDQ CX, DI + JMP loop + +err_corrupt: + MOVQ $-1, ret+48(FP) + RET + +err_short_buf: + MOVQ $-2, ret+48(FP) + RET + +end: + SUBQ R11, DI + MOVQ DI, ret+48(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go new file mode 100644 index 0000000000000..919888edf7dcc --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_other.go @@ -0,0 +1,98 @@ +// +build !amd64 appengine !gc noasm + +package lz4 + +func decodeBlock(dst, src []byte) (ret int) { + const hasError = -2 + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di int + for { + // Literals and match lengths (token). + b := int(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < len(src): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { + i := di - offset + end := i + 18 + if end > len(dst) { + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + end = len(dst) + } + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + case lLen == 0xF: + for src[si] == 0xFF { + lLen += 0xFF + si++ + } + lLen += int(src[si]) + si++ + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + if si >= len(src) { + return di + } + + offset := int(src[si]) | int(src[si+1])<<8 + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen := b & 0xF + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + si++ + } + mLen += int(src[si]) + si++ + } + mLen += minMatch + + // Copy the match. + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += copy(dst[di:di+mLen], expanded[:mLen]) + } +} diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go new file mode 100644 index 0000000000000..1c45d1813cef4 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/errors.go @@ -0,0 +1,30 @@ +package lz4 + +import ( + "errors" + "fmt" + "os" + rdebug "runtime/debug" +) + +var ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") + // ErrInvalid is returned when reading an invalid LZ4 archive. + ErrInvalid = errors.New("lz4: bad magic number") + // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. + ErrBlockDependency = errors.New("lz4: block dependency not supported") + // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. + ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") +) + +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + if debugFlag { + fmt.Fprintln(os.Stderr, r) + rdebug.PrintStack() + } + *e = ErrInvalidSourceShortBuffer + } +} diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go new file mode 100644 index 0000000000000..7a76a6bce2b58 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go @@ -0,0 +1,223 @@ +// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/XXH/) +package xxh32 + +import ( + "encoding/binary" +) + +const ( + prime1 uint32 = 2654435761 + prime2 uint32 = 2246822519 + prime3 uint32 = 3266489917 + prime4 uint32 = 668265263 + prime5 uint32 = 374761393 + + primeMask = 0xFFFFFFFF + prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 + prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 +) + +// XXHZero represents an xxhash32 object with seed 0. +type XXHZero struct { + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh XXHZero) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *XXHZero) Reset() { + xxh.v1 = prime1plus2 + xxh.v2 = prime2 + xxh.v3 = 0 + xxh.v4 = prime1minus + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *XXHZero) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *XXHZero) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *XXHZero) Write(input []byte) (int, error) { + if xxh.totalLen == 0 { + xxh.Reset() + } + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + buf := xxh.buf[:16] // BCE hint. + v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 + p = r + xxh.bufused = 0 + } + + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *XXHZero) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if h32 >= 16 { + h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + } else { + h32 += prime5 + } + + p := 0 + n := xxh.bufused + buf := xxh.buf + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for ; p < n; p++ { + h32 += uint32(buf[p]) * prime5 + h32 = rol11(h32) * prime1 + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// ChecksumZero returns the 32bits Hash value. +func ChecksumZero(input []byte) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += prime5 + } else { + v1 := prime1plus2 + v2 := prime2 + v3 := uint32(0) + v4 := prime1minus + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for p < n { + h32 += uint32(input[p]) * prime5 + h32 = rol11(h32) * prime1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// Uint32Zero hashes x with seed 0. +func Uint32Zero(x uint32) uint32 { + h := prime5 + 4 + x*prime3 + h = rol17(h) * prime4 + h ^= h >> 15 + h *= prime2 + h ^= h >> 13 + h *= prime3 + h ^= h >> 16 + return h +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go new file mode 100644 index 0000000000000..21dcfaeb93d16 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -0,0 +1,113 @@ +// Package lz4 implements reading and writing lz4 compressed data (a frame), +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. +// +// Although the block level compression and decompression functions are exposed and are fully compatible +// with the lz4 block format definition, they are low level and should not be used directly. +// For a complete description of an lz4 compressed block, see: +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +// +// See https://github.com/Cyan4973/lz4 for the reference C implementation. +// +package lz4 + +import "math/bits" + +import "sync" + +const ( + // Extension is the LZ4 frame file name extension + Extension = ".lz4" + // Version is the LZ4 frame format version + Version = 1 + + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 + + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + compressedBlockFlag = 1 << 31 + compressedBlockMask = compressedBlockFlag - 1 + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise for fast compression. + hashLog = 16 + htSize = 1 << hashLog + + mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. +) + +// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. +const ( + blockSize64K = 1 << (16 + 2*iota) + blockSize256K + blockSize1M + blockSize4M +) + +var ( + // Keep a pool of buffers for each valid block sizes. + bsMapValue = [...]*sync.Pool{ + newBufferPool(2 * blockSize64K), + newBufferPool(2 * blockSize256K), + newBufferPool(2 * blockSize1M), + newBufferPool(2 * blockSize4M), + } +) + +// newBufferPool returns a pool for buffers of the given size. +func newBufferPool(size int) *sync.Pool { + return &sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + } +} + +// getBuffer returns a buffer to its pool. +func getBuffer(size int) []byte { + idx := blockSizeValueToIndex(size) - 4 + return bsMapValue[idx].Get().([]byte) +} + +// putBuffer returns a buffer to its pool. +func putBuffer(size int, buf []byte) { + if cap(buf) > 0 { + idx := blockSizeValueToIndex(size) - 4 + bsMapValue[idx].Put(buf[:cap(buf)]) + } +} +func blockSizeIndexToValue(i byte) int { + return 1 << (16 + 2*uint(i)) +} +func isValidBlockSize(size int) bool { + const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M + + return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 +} +func blockSizeValueToIndex(size int) byte { + return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) +} + +// Header describes the various flags that can be set on a Writer or obtained from a Reader. +// The default values match those of the LZ4 frame format definition +// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// +// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. +// It is the caller responsibility to check them if necessary. +type Header struct { + BlockChecksum bool // Compressed blocks checksum flag. + NoChecksum bool // Frame checksum flag. + BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // Frame total size. It is _not_ computed by the Writer. + CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). + done bool // Header processed flag (Read or Write and checked). +} + +func (h *Header) Reset() { + h.done = false +} diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go new file mode 100644 index 0000000000000..9a0fb00709d56 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go @@ -0,0 +1,29 @@ +//+build go1.10 + +package lz4 + +import ( + "fmt" + "strings" +) + +func (h Header) String() string { + var s strings.Builder + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go new file mode 100644 index 0000000000000..12c761a2e7f97 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go @@ -0,0 +1,29 @@ +//+build !go1.10 + +package lz4 + +import ( + "bytes" + "fmt" +) + +func (h Header) String() string { + var s bytes.Buffer + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go new file mode 100644 index 0000000000000..87dd72bd0db3e --- /dev/null +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -0,0 +1,335 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + + "github.com/pierrec/lz4/internal/xxh32" +) + +// Reader implements the LZ4 frame decoder. +// The Header is set after the first call to Read(). +// The Header may change between Read() calls in case of concatenated frames. +type Reader struct { + Header + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + buf [8]byte // Scrap buffer. + pos int64 // Current position in src. + src io.Reader // Source. + zdata []byte // Compressed data. + data []byte // Uncompressed data. + idx int // Index of unread bytes into data. + checksum xxh32.XXHZero // Frame hash. + skip int64 // Bytes to skip before next read. + dpos int64 // Position in dest +} + +// NewReader returns a new LZ4 frame decoder. +// No access to the underlying io.Reader is performed. +func NewReader(src io.Reader) *Reader { + r := &Reader{src: src} + return r +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *Reader) readHeader(first bool) error { + defer z.checksum.Reset() + + buf := z.buf[:] + for { + magic, err := z.readUint32() + if err != nil { + z.pos += 4 + if !first && err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + if magic == frameMagic { + break + } + if magic>>8 != frameSkipMagic>>8 { + return ErrInvalid + } + skipSize, err := z.readUint32() + if err != nil { + return err + } + z.pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + if err != nil { + return err + } + z.pos += m + } + + // Header. + if _, err := io.ReadFull(z.src, buf[:2]); err != nil { + return err + } + z.pos += 8 + + b := buf[0] + if v := b >> 6; v != Version { + return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) + } + if b>>5&1 == 0 { + return ErrBlockDependency + } + z.BlockChecksum = b>>4&1 > 0 + frameSize := b>>3&1 > 0 + z.NoChecksum = b>>2&1 == 0 + + bmsID := buf[1] >> 4 & 0x7 + if bmsID < 4 || bmsID > 7 { + return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) + } + bSize := blockSizeIndexToValue(bmsID - 4) + z.BlockMaxSize = bSize + + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if n := 2 * bSize; cap(z.zdata) < n { + z.zdata = make([]byte, n, n) + } + if debugFlag { + debug("header block max size id=%d size=%d", bmsID, bSize) + } + z.zdata = z.zdata[:bSize] + z.data = z.zdata[:cap(z.zdata)][bSize:] + z.idx = len(z.data) + + _, _ = z.checksum.Write(buf[0:2]) + + if frameSize { + buf := buf[:8] + if _, err := io.ReadFull(z.src, buf); err != nil { + return err + } + z.Size = binary.LittleEndian.Uint64(buf) + z.pos += 8 + _, _ = z.checksum.Write(buf) + } + + // Header checksum. + if _, err := io.ReadFull(z.src, buf[:1]); err != nil { + return err + } + z.pos++ + if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { + return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) + } + + z.Header.done = true + if debugFlag { + debug("header read: %v", z.Header) + } + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +func (z *Reader) Read(buf []byte) (int, error) { + if debugFlag { + debug("Read buf len=%d", len(buf)) + } + if !z.Header.done { + if err := z.readHeader(true); err != nil { + return 0, err + } + if debugFlag { + debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", + len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) + } + } + + if len(buf) == 0 { + return 0, nil + } + + if z.idx == len(z.data) { + // No data ready for reading, process the next block. + if debugFlag { + debug("reading block from writer") + } + // Reset uncompressed buffer + z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] + + // Block length: 0 = end of frame, highest bit set: uncompressed. + bLen, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if bLen == 0 { + // End of frame reached. + if !z.NoChecksum { + // Validate the frame checksum. + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + if debugFlag { + debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) + } + z.pos += 4 + if h := z.checksum.Sum32(); checksum != h { + return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) + } + } + + // Get ready for the next concatenated frame and keep the position. + pos := z.pos + z.Reset(z.src) + z.pos = pos + + // Since multiple frames can be concatenated, check for more. + return 0, z.readHeader(false) + } + + if debugFlag { + debug("raw block size %d", bLen) + } + if bLen&compressedBlockFlag > 0 { + // Uncompressed block. + bLen &= compressedBlockMask + if debugFlag { + debug("uncompressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + z.data = z.data[:bLen] + if _, err := io.ReadFull(z.src, z.data); err != nil { + return 0, err + } + z.pos += int64(bLen) + if z.OnBlockDone != nil { + z.OnBlockDone(int(bLen)) + } + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(z.data); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + } else { + // Compressed block. + if debugFlag { + debug("compressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + zdata := z.zdata[:bLen] + if _, err := io.ReadFull(z.src, zdata); err != nil { + return 0, err + } + z.pos += int64(bLen) + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(zdata); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + n, err := UncompressBlock(zdata, z.data) + if err != nil { + return 0, err + } + z.data = z.data[:n] + if z.OnBlockDone != nil { + z.OnBlockDone(n) + } + } + + if !z.NoChecksum { + _, _ = z.checksum.Write(z.data) + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + } + z.idx = 0 + } + + if z.skip > int64(len(z.data[z.idx:])) { + z.skip -= int64(len(z.data[z.idx:])) + z.dpos += int64(len(z.data[z.idx:])) + z.idx = len(z.data) + return 0, nil + } + + z.idx += int(z.skip) + z.dpos += z.skip + z.skip = 0 + + n := copy(buf, z.data[z.idx:]) + z.idx += n + z.dpos += int64(n) + if debugFlag { + debug("copied %d bytes to input", n) + } + + return n, nil +} + +// Seek implements io.Seeker, but supports seeking forward from the current +// position only. Any other seek will return an error. Allows skipping output +// bytes which aren't needed, which in some scenarios is faster than reading +// and discarding them. +// Note this may cause future calls to Read() to read 0 bytes if all of the +// data they would have returned is skipped. +func (z *Reader) Seek(offset int64, whence int) (int64, error) { + if offset < 0 || whence != io.SeekCurrent { + return z.dpos + z.skip, ErrUnsupportedSeek + } + z.skip += offset + return z.dpos + z.skip, nil +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) { + z.Header = Header{} + z.pos = 0 + z.src = r + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 + z.checksum.Reset() +} + +// readUint32 reads an uint32 into the supplied buffer. +// The idea is to make use of the already allocated buffers avoiding additional allocations. +func (z *Reader) readUint32() (uint32, error) { + buf := z.buf[:4] + _, err := io.ReadFull(z.src, buf) + x := binary.LittleEndian.Uint32(buf) + return x, err +} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go new file mode 100644 index 0000000000000..324f1386b8ad8 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -0,0 +1,408 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "github.com/pierrec/lz4/internal/xxh32" + "io" + "runtime" +) + +// zResult contains the results of compressing a block. +type zResult struct { + size uint32 // Block header + data []byte // Compressed data + checksum uint32 // Data checksum +} + +// Writer implements the LZ4 frame encoder. +type Writer struct { + Header + // Handler called when a block has been successfully written out. + // It provides the number of bytes written. + OnBlockDone func(size int) + + buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + dst io.Writer // Destination. + checksum xxh32.XXHZero // Frame checksum. + data []byte // Data to be compressed + buffer for compressed data. + idx int // Index into data. + hashtable [winSize]int // Hash table used in CompressBlock(). + + // For concurrency. + c chan chan zResult // Channel for block compression goroutines and writer goroutine. + err error // Any error encountered while writing to the underlying destination. +} + +// NewWriter returns a new LZ4 frame encoder. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriter(dst io.Writer) *Writer { + z := new(Writer) + z.Reset(dst) + return z +} + +// WithConcurrency sets the number of concurrent go routines used for compression. +// A negative value sets the concurrency to GOMAXPROCS. +func (z *Writer) WithConcurrency(n int) *Writer { + switch { + case n == 0 || n == 1: + z.c = nil + return z + case n < 0: + n = runtime.GOMAXPROCS(0) + } + z.c = make(chan chan zResult, n) + // Writer goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range z.c { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + res := <-c + n := len(res.data) + if n == 0 { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Write the block. + if err := z.writeUint32(res.size); err != nil && z.err == nil { + z.err = err + } + if _, err := z.dst.Write(res.data); err != nil && z.err == nil { + z.err = err + } + if z.BlockChecksum { + if err := z.writeUint32(res.checksum); err != nil && z.err == nil { + z.err = err + } + } + if isCompressed := res.size&compressedBlockFlag == 0; isCompressed { + // It is now safe to release the buffer as no longer in use by any goroutine. + putBuffer(cap(res.data), res.data) + } + if h := z.OnBlockDone; h != nil { + h(n) + } + close(c) + } + }() + return z +} + +// newBuffers instantiates new buffers which size matches the one in Header. +// The returned buffers are for decompression and compression respectively. +func (z *Writer) newBuffers() { + bSize := z.Header.BlockMaxSize + buf := getBuffer(bSize) + z.data = buf[:bSize] // Uncompressed buffer is the first half. +} + +// freeBuffers puts the writer's buffers back to the pool. +func (z *Writer) freeBuffers() { + // Put the buffer back into the pool, if any. + putBuffer(z.Header.BlockMaxSize, z.data) + z.data = nil +} + +// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. +func (z *Writer) writeHeader() error { + // Default to 4Mb if BlockMaxSize is not set. + if z.Header.BlockMaxSize == 0 { + z.Header.BlockMaxSize = blockSize4M + } + // The only option that needs to be validated. + bSize := z.Header.BlockMaxSize + if !isValidBlockSize(z.Header.BlockMaxSize) { + return fmt.Errorf("lz4: invalid block max size: %d", bSize) + } + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + z.newBuffers() + z.idx = 0 + + // Size is optional. + buf := z.buf[:] + + // Set the fixed size data: magic number, block max size and flags. + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(Version << 6) + flg |= 1 << 5 // No block dependency. + if z.Header.BlockChecksum { + flg |= 1 << 4 + } + if z.Header.Size > 0 { + flg |= 1 << 3 + } + if !z.Header.NoChecksum { + flg |= 1 << 2 + } + buf[4] = flg + buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 + + // Current buffer size: magic(4) + flags(1) + block max size (1). + n := 6 + // Optional items. + if z.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) + n += 8 + } + + // The header checksum includes the flags, block max size and optional Size. + buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) + z.checksum.Reset() + + // Header ready, write it out. + if _, err := z.dst.Write(buf[0 : n+1]); err != nil { + return err + } + z.Header.done = true + if debugFlag { + debug("wrote header %v", z.Header) + } + + return nil +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +func (z *Writer) Write(buf []byte) (int, error) { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return 0, err + } + } + if debugFlag { + debug("input buffer len=%d index=%d", len(buf), z.idx) + } + + zn := len(z.data) + var n int + for len(buf) > 0 { + if z.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err := z.compressBlock(buf[:zn]); err != nil { + return n, err + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(z.data[z.idx:], buf) + n += m + z.idx += m + buf = buf[m:] + if debugFlag { + debug("%d bytes copied to buf, current index %d", n, z.idx) + } + + if z.idx < len(z.data) { + // Buffer not filled. + if debugFlag { + debug("need more data for compression") + } + return n, nil + } + + // Buffer full. + if err := z.compressBlock(z.data); err != nil { + return n, err + } + z.idx = 0 + } + + return n, nil +} + +// compressBlock compresses a block. +func (z *Writer) compressBlock(data []byte) error { + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + + if z.c != nil { + c := make(chan zResult) + z.c <- c // Send now to guarantee order + go writerCompressBlock(c, z.Header, data) + return nil + } + + zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] + // The compressed block size cannot exceed the input's. + var zn int + + if level := z.Header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + zn, _ = CompressBlock(data, zdata, z.hashtable[:]) + } + + var bLen uint32 + if debugFlag { + debug("block compression %d => %d", len(data), zn) + } + if zn > 0 && zn < len(data) { + // Compressible and compressed size smaller than uncompressed: ok! + bLen = uint32(zn) + zdata = zdata[:zn] + } else { + // Uncompressed block. + bLen = uint32(len(data)) | compressedBlockFlag + zdata = data + } + if debugFlag { + debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) + } + + // Write the block. + if err := z.writeUint32(bLen); err != nil { + return err + } + written, err := z.dst.Write(zdata) + if err != nil { + return err + } + if h := z.OnBlockDone; h != nil { + h(written) + } + + if !z.BlockChecksum { + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + return nil + } + checksum := xxh32.ChecksumZero(zdata) + if debugFlag { + debug("block checksum %x", checksum) + defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }() + } + return z.writeUint32(checksum) +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +func (z *Writer) Flush() error { + if debugFlag { + debug("flush with index %d", z.idx) + } + if z.idx == 0 { + return nil + } + + data := z.data[:z.idx] + z.idx = 0 + if z.c == nil { + return z.compressBlock(data) + } + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + c := make(chan zResult) + z.c <- c + writerCompressBlock(c, z.Header, data) + return nil +} + +func (z *Writer) close() error { + if z.c == nil { + return nil + } + // Send a sentinel block (no data to compress) to terminate the writer main goroutine. + c := make(chan zResult) + z.c <- c + c <- zResult{} + // Wait for the main goroutine to complete. + <-c + // At this point the main goroutine has shut down or is about to return. + z.c = nil + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + if err := z.Flush(); err != nil { + return err + } + if err := z.close(); err != nil { + return err + } + z.freeBuffers() + + if debugFlag { + debug("writing last empty block") + } + if err := z.writeUint32(0); err != nil { + return err + } + if z.NoChecksum { + return nil + } + checksum := z.checksum.Sum32() + if debugFlag { + debug("stream checksum %x", checksum) + } + return z.writeUint32(checksum) +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriter, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *Writer) Reset(w io.Writer) { + n := cap(z.c) + _ = z.close() + z.freeBuffers() + z.Header.Reset() + z.dst = w + z.checksum.Reset() + z.idx = 0 + z.err = nil + z.WithConcurrency(n) +} + +// writeUint32 writes a uint32 to the underlying writer. +func (z *Writer) writeUint32(x uint32) error { + buf := z.buf[:4] + binary.LittleEndian.PutUint32(buf, x) + _, err := z.dst.Write(buf) + return err +} + +// writerCompressBlock compresses data into a pooled buffer and writes its result +// out to the input channel. +func writerCompressBlock(c chan zResult, header Header, data []byte) { + zdata := getBuffer(header.BlockMaxSize) + // The compressed block size cannot exceed the input's. + var zn int + if level := header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + var hashTable [winSize]int + zn, _ = CompressBlock(data, zdata, hashTable[:]) + } + var res zResult + if zn > 0 && zn < len(data) { + res.size = uint32(zn) + res.data = zdata[:zn] + } else { + res.size = uint32(len(data)) | compressedBlockFlag + res.data = data + } + if header.BlockChecksum { + res.checksum = xxh32.ChecksumZero(res.data) + } + c <- res +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 30f1902a8266d..6d085fed8aa85 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -340,11 +340,9 @@ github.com/jonboulle/clockwork github.com/jpillora/backoff # github.com/json-iterator/go v1.1.7 github.com/json-iterator/go -# github.com/klauspost/compress v1.7.4 +# github.com/klauspost/compress v1.9.4 github.com/klauspost/compress/flate github.com/klauspost/compress/gzip -# github.com/klauspost/cpuid v1.2.1 -github.com/klauspost/cpuid # github.com/konsorten/go-windows-terminal-sequences v1.0.2 github.com/konsorten/go-windows-terminal-sequences # github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 @@ -387,6 +385,9 @@ github.com/opentracing-contrib/go-stdlib/nethttp github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log +# github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible +github.com/pierrec/lz4 +github.com/pierrec/lz4/internal/xxh32 # github.com/pkg/errors v0.8.1 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0