diff --git a/go.mod b/go.mod index edc3abaabdc1..0815e8cfe990 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/containerd/containerd v1.3.2 // indirect github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e - github.com/cortexproject/cortex v1.0.0 + github.com/cortexproject/cortex v1.0.1-0.20200416152925-3fe04dcff1d8 github.com/davecgh/go-spew v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 @@ -47,7 +47,6 @@ require ( github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd github.com/stretchr/testify v1.5.1 - github.com/thanos-io/thanos v0.11.0 // indirect github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/uber/jaeger-client-go v2.20.1+incompatible github.com/ugorji/go v1.1.7 // indirect diff --git a/go.sum b/go.sum index ca8157e35838..da4333a8217b 100644 --- a/go.sum +++ b/go.sum @@ -163,8 +163,8 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= -github.com/cortexproject/cortex v1.0.0 h1:SbvD/LBbp50bQBq+lMwYoS91I6DUMbRKaYxE6UmSEa0= -github.com/cortexproject/cortex v1.0.0/go.mod h1:KixgGK5GO7YVo48k37rvHOEQlwpDCqHSPX2Mv2IuJMY= +github.com/cortexproject/cortex v1.0.1-0.20200416152925-3fe04dcff1d8 h1:A7nGtA5pj10j5bwbLPqf5C+WAhVzFaOt1c/uen6202o= +github.com/cortexproject/cortex v1.0.1-0.20200416152925-3fe04dcff1d8/go.mod h1:5NXU+UpV8NW6I3teskVmxn45xcq4+IbtSOINfRf+jds= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -701,7 +701,6 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= -github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33 h1:HBYrMJj5iosUjUkAK9L5GO+5eEQXbcrzdjkqY9HV5W4= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33/go.mod h1:fkIPPkuZnkXyopYHmXPxf9rgiPkVgZCN8w9o8+UgBlY= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 h1:+kGqA4dNN5hn7WwvKdzHl0rdN5AEkbNZd0VjRltAiZg= @@ -767,9 +766,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= -github.com/thanos-io/thanos v0.8.1-0.20200326105947-214ff4480e93/go.mod h1:PeLHoE5XdPZss/3eLvuxDCFXnM6Sd2Kh+saQIRJVtBE= -github.com/thanos-io/thanos v0.11.0 h1:UkWLa93sihcxCofelRH/NBGQxFyFU73eXIr2a+dwOFM= -github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc= +github.com/thanos-io/thanos v0.12.1-0.20200416112106-b391ca115ed8 h1:z7sOhoCEWnrQ2MIew3cJxsaxKT0AQu5pgXA8ZjdrYlk= +github.com/thanos-io/thanos v0.12.1-0.20200416112106-b391ca115ed8/go.mod h1:+nN9AzmfaIH2e2KJGyRxX0BoUGrRSyZmp+U8ToRxlDc= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index fc5773bd8101..5e30adc46bcb 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -9,8 +9,8 @@ import ( "time" cortex_distributor "github.com/cortexproject/cortex/pkg/distributor" - cortex_client "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" + ring_client "github.com/cortexproject/cortex/pkg/ring/client" cortex_util "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/services" @@ -65,7 +65,7 @@ type Config struct { DistributorRing cortex_distributor.RingConfig `yaml:"ring,omitempty"` // For testing. - factory func(addr string) (grpc_health_v1.HealthClient, error) `yaml:"-"` + factory ring_client.PoolFactory `yaml:"-"` } // RegisterFlags registers the flags. @@ -79,7 +79,7 @@ type Distributor struct { clientCfg client.Config ingestersRing ring.ReadRing validator *Validator - pool *cortex_client.Pool + pool *ring_client.Pool // The global rate limiter requires a distributors ring to count // the number of healthy instances. @@ -93,7 +93,7 @@ type Distributor struct { func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overrides *validation.Overrides) (*Distributor, error) { factory := cfg.factory if factory == nil { - factory = func(addr string) (grpc_health_v1.HealthClient, error) { + factory = func(addr string) (ring_client.PoolClient, error) { return client.New(clientCfg, addr) } } @@ -137,7 +137,7 @@ func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overr ingestersRing: ingestersRing, distributorsRing: distributorsRing, validator: validator, - pool: cortex_client.NewPool(clientCfg.PoolConfig, ingestersRing, factory, cortex_util.Logger), + pool: cortex_distributor.NewPool(clientCfg.PoolConfig, ingestersRing, factory, cortex_util.Logger), ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 9b3c3ac2d737..8a40bb4ad760 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/cortexproject/cortex/pkg/ring" + ring_client "github.com/cortexproject/cortex/pkg/ring/client" "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/ring/kv/consul" "github.com/cortexproject/cortex/pkg/util/flagext" @@ -219,7 +220,7 @@ func prepare(t *testing.T, limits *validation.Limits, kvStore kv.Client) *Distri distributorConfig.DistributorRing.InstanceID = strconv.Itoa(rand.Int()) distributorConfig.DistributorRing.KVStore.Mock = kvStore distributorConfig.DistributorRing.InstanceInterfaceNames = []string{"eth0", "en0", "lo0"} - distributorConfig.factory = func(addr string) (grpc_health_v1.HealthClient, error) { + distributorConfig.factory = func(addr string) (ring_client.PoolClient, error) { return ingesters[addr], nil } @@ -260,6 +261,10 @@ func (i *mockIngester) Push(ctx context.Context, in *logproto.PushRequest, opts return nil, nil } +func (i *mockIngester) Close() error { + return nil +} + // Copied from Cortex; TODO(twilkie) - factor this our and share it. // mockRing doesn't do virtual nodes, just returns mod(key) + replicationFactor // ingesters. diff --git a/pkg/ingester/client/client.go b/pkg/ingester/client/client.go index 7cbeb59bae16..717876f29753 100644 --- a/pkg/ingester/client/client.go +++ b/pkg/ingester/client/client.go @@ -5,10 +5,10 @@ import ( "io" "time" - cortex_client "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go" "github.com/weaveworks/common/middleware" "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" @@ -16,16 +16,30 @@ import ( "github.com/grafana/loki/pkg/logproto" ) +type HealthAndIngesterClient interface { + logproto.IngesterClient + grpc_health_v1.HealthClient + Close() error +} + +type ClosableHealthAndIngesterClient struct { + logproto.PusherClient + logproto.QuerierClient + logproto.IngesterClient + grpc_health_v1.HealthClient + io.Closer +} + // Config for an ingester client. type Config struct { - PoolConfig cortex_client.PoolConfig `yaml:"pool_config,omitempty"` - RemoteTimeout time.Duration `yaml:"remote_timeout,omitempty"` - GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` + PoolConfig distributor.PoolConfig `yaml:"pool_config,omitempty"` + RemoteTimeout time.Duration `yaml:"remote_timeout,omitempty"` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` } // RegisterFlags registers flags. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.GRPCClientConfig.RegisterFlags("ingester.client", f) + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("ingester.client", f) cfg.PoolConfig.RegisterFlags(f) f.DurationVar(&cfg.PoolConfig.RemoteTimeout, "ingester.client.healthcheck-timeout", 1*time.Second, "Timeout for healthcheck rpcs.") @@ -33,7 +47,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { } // New returns a new ingester client. -func New(cfg Config, addr string) (grpc_health_v1.HealthClient, error) { +func New(cfg Config, addr string) (HealthAndIngesterClient, error) { opts := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithDefaultCallOptions(cfg.GRPCClientConfig.CallOptions()...), @@ -43,13 +57,7 @@ func New(cfg Config, addr string) (grpc_health_v1.HealthClient, error) { if err != nil { return nil, err } - return struct { - logproto.PusherClient - logproto.QuerierClient - logproto.IngesterClient - grpc_health_v1.HealthClient - io.Closer - }{ + return ClosableHealthAndIngesterClient{ PusherClient: logproto.NewPusherClient(conn), QuerierClient: logproto.NewQuerierClient(conn), IngesterClient: logproto.NewIngesterClient(conn), diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index fd42629fb610..7a5b8c1bf4ee 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -62,7 +62,7 @@ type Config struct { MaxReturnedErrors int `yaml:"max_returned_stream_errors"` // For testing, you can override the address and ID of this ingester. - ingesterClientFactory func(cfg client.Config, addr string) (grpc_health_v1.HealthClient, error) + ingesterClientFactory func(cfg client.Config, addr string) (client.HealthAndIngesterClient, error) } // RegisterFlags registers the flags. diff --git a/pkg/ingester/transfer_test.go b/pkg/ingester/transfer_test.go index ca1092e5b1af..887829bfeb23 100644 --- a/pkg/ingester/transfer_test.go +++ b/pkg/ingester/transfer_test.go @@ -12,13 +12,13 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/go-kit/kit/log/level" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/common/user" "golang.org/x/net/context" "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" "github.com/grafana/loki/pkg/ingester/client" "github.com/grafana/loki/pkg/logproto" @@ -141,19 +141,13 @@ func (f *testIngesterFactory) getIngester(joinAfter time.Duration, t *testing.T) cfg.LifecyclerConfig.JoinAfter = joinAfter cfg.LifecyclerConfig.Addr = cfg.LifecyclerConfig.ID - cfg.ingesterClientFactory = func(cfg client.Config, addr string) (grpc_health_v1.HealthClient, error) { + cfg.ingesterClientFactory = func(cfg client.Config, addr string) (client.HealthAndIngesterClient, error) { ingester, ok := f.ingesters[addr] if !ok { return nil, fmt.Errorf("no ingester %s", addr) } - return struct { - logproto.PusherClient - logproto.QuerierClient - logproto.IngesterClient - grpc_health_v1.HealthClient - io.Closer - }{ + return client.ClosableHealthAndIngesterClient{ PusherClient: nil, QuerierClient: nil, IngesterClient: &testIngesterClient{t: f.t, i: ingester}, diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 9da654cdf5bf..4c5fa670957c 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -6,7 +6,6 @@ import ( "net/http" "time" - "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/common/model" @@ -14,9 +13,11 @@ import ( "github.com/weaveworks/common/user" "google.golang.org/grpc/health/grpc_health_v1" - cortex_client "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/ring" + ring_client "github.com/cortexproject/cortex/pkg/ring/client" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" cortex_validation "github.com/cortexproject/cortex/pkg/util/validation" "github.com/grafana/loki/pkg/ingester/client" @@ -59,7 +60,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { type Querier struct { cfg Config ring ring.ReadRing - pool *cortex_client.Pool + pool *ring_client.Pool store storage.Store engine logql.Engine limits *validation.Overrides @@ -67,7 +68,7 @@ type Querier struct { // New makes a new Querier. func New(cfg Config, clientCfg client.Config, ring ring.ReadRing, store storage.Store, limits *validation.Overrides) (*Querier, error) { - factory := func(addr string) (grpc_health_v1.HealthClient, error) { + factory := func(addr string) (ring_client.PoolClient, error) { return client.New(clientCfg, addr) } @@ -76,11 +77,11 @@ func New(cfg Config, clientCfg client.Config, ring ring.ReadRing, store storage. // newQuerier creates a new Querier and allows to pass a custom ingester client factory // used for testing purposes -func newQuerier(cfg Config, clientCfg client.Config, clientFactory cortex_client.Factory, ring ring.ReadRing, store storage.Store, limits *validation.Overrides) (*Querier, error) { +func newQuerier(cfg Config, clientCfg client.Config, clientFactory ring_client.PoolFactory, ring ring.ReadRing, store storage.Store, limits *validation.Overrides) (*Querier, error) { querier := Querier{ cfg: cfg, ring: ring, - pool: cortex_client.NewPool(clientCfg.PoolConfig, ring, clientFactory, util.Logger), + pool: distributor.NewPool(clientCfg.PoolConfig, ring, clientFactory, util.Logger), store: store, limits: limits, } diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index a7b4afd9b0a1..730cfcc6798b 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -6,11 +6,12 @@ import ( "fmt" "time" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/chunk" - cortex_client "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/ring" + ring_client "github.com/cortexproject/cortex/pkg/ring/client" + "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" @@ -70,10 +71,14 @@ func (c *querierClientMock) Context() context.Context { return context.Background() } +func (c *querierClientMock) Close() error { + return nil +} + // newIngesterClientMockFactory creates a factory function always returning // the input querierClientMock -func newIngesterClientMockFactory(c *querierClientMock) cortex_client.Factory { - return func(addr string) (grpc_health_v1.HealthClient, error) { +func newIngesterClientMockFactory(c *querierClientMock) ring_client.PoolFactory { + return func(addr string) (ring_client.PoolClient, error) { return c, nil } } @@ -81,7 +86,7 @@ func newIngesterClientMockFactory(c *querierClientMock) cortex_client.Factory { // mockIngesterClientConfig returns an ingester client config suitable for testing func mockIngesterClientConfig() client.Config { return client.Config{ - PoolConfig: cortex_client.PoolConfig{ + PoolConfig: distributor.PoolConfig{ ClientCleanupPeriod: 1 * time.Minute, HealthCheckIngesters: false, RemoteTimeout: 1 * time.Second, diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index 809145aa2849..fe6f15f8d673 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -36,8 +36,8 @@ var ( CacheConfig: cache.Config{ EnableFifoCache: true, Fifocache: cache.FifoCacheConfig{ - Size: 1024, - Validity: 24 * time.Hour, + MaxSizeItems: 1024, + Validity: 24 * time.Hour, }, }, }, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go index a7b1e84f6df1..d4395038c4ad 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go @@ -755,8 +755,14 @@ func (b dynamoDBWriteBatch) Add(tableName, hashValue string, rangeValue []byte, } func (b dynamoDBWriteBatch) Delete(tableName, hashValue string, rangeValue []byte) { - // ToDo: implement this to support deleting index entries from DynamoDB - panic("DynamoDB does not support Deleting index entries yet") + b[tableName] = append(b[tableName], &dynamodb.WriteRequest{ + DeleteRequest: &dynamodb.DeleteRequest{ + Key: map[string]*dynamodb.AttributeValue{ + hashKey: {S: aws.String(hashValue)}, + rangeKey: {B: rangeValue}, + }, + }, + }) } // Fill 'b' with WriteRequests from 'from' until 'b' has at most max requests. Remove those requests from 'from'. diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go index f47be182cd46..a8e7c2c6c2fc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go @@ -68,8 +68,9 @@ func New(cfg Config) (Cache, error) { cfg.Fifocache.Validity = cfg.DefaultValidity } - cache := NewFifoCache(cfg.Prefix+"fifocache", cfg.Fifocache) - caches = append(caches, Instrument(cfg.Prefix+"fifocache", cache)) + if cache := NewFifoCache(cfg.Prefix+"fifocache", cfg.Fifocache); cache != nil { + caches = append(caches, Instrument(cfg.Prefix+"fifocache", cache)) + } } if (cfg.MemcacheClient.Host != "" || cfg.MemcacheClient.Addresses != "") && cfg.Redis.Endpoint != "" { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go index 507def741874..e8302862a820 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go @@ -1,16 +1,19 @@ package cache import ( + "container/list" "context" "flag" "sync" "time" "unsafe" + "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" ) var ( @@ -71,29 +74,45 @@ var ( }, []string{"cache"}) ) +const ( + elementSize = int(unsafe.Sizeof(list.Element{})) + elementPrtSize = int(unsafe.Sizeof(&list.Element{})) +) + +// This FIFO cache implementation supports two eviction methods - based on number of items in the cache, and based on memory usage. +// For the memory-based eviction, set FifoCacheConfig.MaxSizeBytes to a positive integer, indicating upper limit of memory allocated by items in the cache. +// Alternatively, set FifoCacheConfig.MaxSizeItems to a positive integer, indicating maximum number of items in the cache. +// If both parameters are set, both methods are enforced, whichever hits first. + // FifoCacheConfig holds config for the FifoCache. type FifoCacheConfig struct { - Size int `yaml:"size"` - Validity time.Duration `yaml:"validity"` + MaxSizeBytes int `yaml:"max_size_bytes"` + MaxSizeItems int `yaml:"max_size_items"` + Validity time.Duration `yaml:"validity"` + + DeprecatedSize int `yaml:"size"` } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet func (cfg *FifoCacheConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) { - f.IntVar(&cfg.Size, prefix+"fifocache.size", 0, description+"The number of entries to cache.") + f.IntVar(&cfg.MaxSizeBytes, prefix+"fifocache.max-size-bytes", 0, description+"Maximum memory size of the cache.") + f.IntVar(&cfg.MaxSizeItems, prefix+"fifocache.max-size-items", 0, description+"Maximum number of entries in the cache.") f.DurationVar(&cfg.Validity, prefix+"fifocache.duration", 0, description+"The expiry duration for the cache.") + + f.IntVar(&cfg.DeprecatedSize, prefix+"fifocache.size", 0, "Deprecated (use max-size-items or max-size-bytes instead): "+description+"The number of entries to cache. ") } // FifoCache is a simple string -> interface{} cache which uses a fifo slide to // manage evictions. O(1) inserts and updates, O(1) gets. type FifoCache struct { - lock sync.RWMutex - size int - validity time.Duration - entries []cacheEntry - index map[string]int + lock sync.RWMutex + maxSizeItems int + maxSizeBytes int + currSizeBytes int + validity time.Duration - // indexes into entries to identify the most recent and least recent entry. - first, last int + entries map[string]*list.Element + lru *list.List entriesAdded prometheus.Counter entriesAddedNew prometheus.Counter @@ -106,10 +125,9 @@ type FifoCache struct { } type cacheEntry struct { - updated time.Time - key string - value interface{} - prev, next int + updated time.Time + key string + value []byte } // NewFifoCache returns a new initialised FifoCache of size. @@ -117,11 +135,22 @@ type cacheEntry struct { func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache { util.WarnExperimentalUse("In-memory (FIFO) cache") - cache := &FifoCache{ - size: cfg.Size, - validity: cfg.Validity, - entries: make([]cacheEntry, 0, cfg.Size), - index: make(map[string]int, cfg.Size), + if cfg.DeprecatedSize > 0 { + flagext.DeprecatedFlagsUsed.Inc() + level.Warn(util.Logger).Log("msg", "running with DEPRECATED flag fifocache.size, use fifocache.max-size-items or fifocache.max-size-bytes instead", "cache", name) + cfg.MaxSizeItems = cfg.DeprecatedSize + } + if cfg.MaxSizeBytes == 0 && cfg.MaxSizeItems == 0 { + // zero cache capacity - no need to create cache + level.Warn(util.Logger).Log("msg", "neither fifocache.max-size-bytes nor fifocache.max-size-items is set", "cache", name) + return nil + } + return &FifoCache{ + maxSizeItems: cfg.MaxSizeItems, + maxSizeBytes: cfg.MaxSizeBytes, + validity: cfg.Validity, + entries: make(map[string]*list.Element), + lru: list.New(), // TODO(bwplotka): There might be simple cache.Cache wrapper for those. entriesAdded: cacheEntriesAdded.WithLabelValues(name), @@ -133,9 +162,6 @@ func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache { staleGets: cacheStaleGets.WithLabelValues(name), memoryBytes: cacheMemoryBytes.WithLabelValues(name), } - // set initial memory allocation - cache.memoryBytes.Set(float64(int(unsafe.Sizeof(cacheEntry{})) * cache.size)) - return cache } // Fetch implements Cache. @@ -149,125 +175,101 @@ func (c *FifoCache) Fetch(ctx context.Context, keys []string) (found []string, b } found = append(found, key) - bufs = append(bufs, val.([]byte)) + bufs = append(bufs, val) } - return } // Store implements Cache. -func (c *FifoCache) Store(ctx context.Context, keys []string, bufs [][]byte) { - values := make([]interface{}, 0, len(bufs)) - for _, buf := range bufs { - values = append(values, buf) - } - c.Put(ctx, keys, values) -} - -// Stop implements Cache. -func (c *FifoCache) Stop() { -} - -// Put stores the value against the key. -func (c *FifoCache) Put(ctx context.Context, keys []string, values []interface{}) { +func (c *FifoCache) Store(ctx context.Context, keys []string, values [][]byte) { c.entriesAdded.Inc() - if c.size == 0 { - return - } c.lock.Lock() defer c.lock.Unlock() for i := range keys { - c.put(ctx, keys[i], values[i]) + c.put(keys[i], values[i]) } } -func (c *FifoCache) put(ctx context.Context, key string, value interface{}) { - // See if we already have the entry - index, ok := c.index[key] - if ok { - entry := c.entries[index] - deltaSize := sizeOf(value) - sizeOf(entry.value) +// Stop implements Cache. +func (c *FifoCache) Stop() { + c.lock.Lock() + defer c.lock.Unlock() - entry.updated = time.Now() - entry.value = value + c.entriesEvicted.Add(float64(c.lru.Len())) - // Remove this entry from the FIFO linked-list. - c.entries[entry.prev].next = entry.next - c.entries[entry.next].prev = entry.prev + c.entries = make(map[string]*list.Element) + c.lru.Init() + c.currSizeBytes = 0 - // Corner case: updating last element - if c.last == index { - c.last = entry.prev - } + c.entriesCurrent.Set(float64(0)) + c.memoryBytes.Set(float64(0)) +} - // Insert it at the beginning - entry.next = c.first - entry.prev = c.last - c.entries[entry.next].prev = index - c.entries[entry.prev].next = index - c.first = index +func (c *FifoCache) put(key string, value []byte) { + // See if we already have the item in the cache. + element, ok := c.entries[key] + if ok { + // Remove the item from the cache. + entry := c.lru.Remove(element).(*cacheEntry) + delete(c.entries, key) + c.currSizeBytes -= sizeOf(entry) + c.entriesCurrent.Dec() + } - c.entries[index] = entry - c.memoryBytes.Add(float64(deltaSize)) + entry := &cacheEntry{ + updated: time.Now(), + key: key, + value: value, + } + entrySz := sizeOf(entry) + + if c.maxSizeBytes > 0 && entrySz > c.maxSizeBytes { + // Cannot keep this item in the cache. + if ok { + // We do not replace this item. + c.entriesEvicted.Inc() + } + c.memoryBytes.Set(float64(c.currSizeBytes)) return } - c.entriesAddedNew.Inc() - // Otherwise, see if we need to evict an entry. - if len(c.entries) >= c.size { + // Otherwise, see if we need to evict item(s). + for (c.maxSizeBytes > 0 && c.currSizeBytes+entrySz > c.maxSizeBytes) || (c.maxSizeItems > 0 && len(c.entries) >= c.maxSizeItems) { + lastElement := c.lru.Back() + if lastElement == nil { + break + } + evicted := c.lru.Remove(lastElement).(*cacheEntry) + delete(c.entries, evicted.key) + c.currSizeBytes -= sizeOf(evicted) + c.entriesCurrent.Dec() c.entriesEvicted.Inc() - index = c.last - entry := c.entries[index] - deltaSize := sizeOf(key) + sizeOf(value) - sizeOf(entry.key) - sizeOf(entry.value) - - c.last = entry.prev - c.first = index - delete(c.index, entry.key) - c.index[key] = index - - entry.updated = time.Now() - entry.value = value - entry.key = key - c.entries[index] = entry - c.memoryBytes.Add(float64(deltaSize)) - return } - // Finally, no hit and we have space. - index = len(c.entries) - c.entries = append(c.entries, cacheEntry{ - updated: time.Now(), - key: key, - value: value, - prev: c.last, - next: c.first, - }) - c.entries[c.first].prev = index - c.entries[c.last].next = index - c.first = index - c.index[key] = index - - c.memoryBytes.Add(float64(sizeOf(key) + sizeOf(value))) + // Finally, we have space to add the item. + c.entries[key] = c.lru.PushFront(entry) + c.currSizeBytes += entrySz + if !ok { + c.entriesAddedNew.Inc() + } c.entriesCurrent.Inc() + c.memoryBytes.Set(float64(c.currSizeBytes)) } // Get returns the stored value against the key and when the key was last updated. -func (c *FifoCache) Get(ctx context.Context, key string) (interface{}, bool) { +func (c *FifoCache) Get(ctx context.Context, key string) ([]byte, bool) { c.totalGets.Inc() - if c.size == 0 { - return nil, false - } c.lock.RLock() defer c.lock.RUnlock() - index, ok := c.index[key] + element, ok := c.entries[key] if ok { - updated := c.entries[index].updated - if c.validity == 0 || time.Since(updated) < c.validity { - return c.entries[index].value, true + entry := element.Value.(*cacheEntry) + if c.validity == 0 || time.Since(entry.updated) < c.validity { + return entry.value, true } c.totalMisses.Inc() @@ -279,38 +281,10 @@ func (c *FifoCache) Get(ctx context.Context, key string) (interface{}, bool) { return nil, false } -func sizeOf(i interface{}) int { - switch v := i.(type) { - case string: - return len(v) - case []int8: - return len(v) - case []uint8: - return len(v) - case []int32: - return len(v) * 4 - case []uint32: - return len(v) * 4 - case []float32: - return len(v) * 4 - case []int64: - return len(v) * 8 - case []uint64: - return len(v) * 8 - case []float64: - return len(v) * 8 - // next 2 cases are machine dependent - case []int: - if l := len(v); l > 0 { - return int(unsafe.Sizeof(v[0])) * l - } - return 0 - case []uint: - if l := len(v); l > 0 { - return int(unsafe.Sizeof(v[0])) * l - } - return 0 - default: - return int(unsafe.Sizeof(i)) - } +func sizeOf(item *cacheEntry) int { + return int(unsafe.Sizeof(*item)) + // size of cacheEntry + len(item.key) + // size of key + cap(item.value) + // size of value + elementSize + // size of the element in linked list + elementPrtSize // size of the pointer to an element in the map } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go index 6a5307d6d160..ce2b662cd9fc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go @@ -3,6 +3,7 @@ package cassandra import ( "bytes" "context" + "crypto/tls" "flag" "fmt" "io/ioutil" @@ -68,6 +69,9 @@ func (cfg *Config) Validate() error { if cfg.Password.Value != "" && cfg.PasswordFile != "" { return errors.Errorf("The password and password_file config options are mutually exclusive.") } + if cfg.SSL && cfg.HostVerification && len(strings.Split(cfg.Addresses, ",")) != 1 { + return errors.Errorf("Host verification is only possible for a single host.") + } return nil } @@ -118,9 +122,18 @@ func (cfg *Config) setClusterConfig(cluster *gocql.ClusterConfig) error { cluster.DisableInitialHostLookup = cfg.DisableInitialHostLookup if cfg.SSL { - cluster.SslOpts = &gocql.SslOptions{ - CaPath: cfg.CAPath, - EnableHostVerification: cfg.HostVerification, + if cfg.HostVerification { + cluster.SslOpts = &gocql.SslOptions{ + CaPath: cfg.CAPath, + EnableHostVerification: true, + Config: &tls.Config{ + ServerName: strings.Split(cfg.Addresses, ",")[0], + }, + } + } else { + cluster.SslOpts = &gocql.SslOptions{ + EnableHostVerification: false, + } } } if cfg.Auth { @@ -210,6 +223,7 @@ func (s *StorageClient) Stop() { // atomic writes. Therefore we just do a bunch of writes in parallel. type writeBatch struct { entries []chunk.IndexEntry + deletes []chunk.IndexEntry } // NewWriteBatch implement chunk.IndexClient. @@ -227,8 +241,11 @@ func (b *writeBatch) Add(tableName, hashValue string, rangeValue []byte, value [ } func (b *writeBatch) Delete(tableName, hashValue string, rangeValue []byte) { - // ToDo: implement this to support deleting index entries from Cassandra - panic("Cassandra does not support Deleting index entries yet") + b.deletes = append(b.deletes, chunk.IndexEntry{ + TableName: tableName, + HashValue: hashValue, + RangeValue: rangeValue, + }) } // BatchWrite implement chunk.IndexClient. @@ -243,6 +260,14 @@ func (s *StorageClient) BatchWrite(ctx context.Context, batch chunk.WriteBatch) } } + for _, entry := range b.deletes { + err := s.session.Query(fmt.Sprintf("DELETE FROM %s WHERE hash = ? and range = ?", + entry.TableName), entry.HashValue, entry.RangeValue).WithContext(ctx).Exec() + if err != nil { + return errors.WithStack(err) + } + } + return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index ccd45b7395d8..02cc7da879ba 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -49,10 +49,10 @@ type StoreConfig struct { ChunkCacheConfig cache.Config `yaml:"chunk_cache_config"` WriteDedupeCacheConfig cache.Config `yaml:"write_dedupe_cache_config"` - CacheLookupsOlderThan time.Duration `yaml:"cache_lookups_older_than"` + CacheLookupsOlderThan model.Duration `yaml:"cache_lookups_older_than"` // Limits query start time to be greater than now() - MaxLookBackPeriod, if set. - MaxLookBackPeriod time.Duration `yaml:"max_look_back_period"` + MaxLookBackPeriod model.Duration `yaml:"max_look_back_period"` // Not visible in yaml because the setting shouldn't be common between ingesters and queriers. // This exists in case we don't want to cache all the chunks but still want to take advantage of @@ -67,28 +67,27 @@ func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.chunkCacheStubs, "store.chunks-cache.cache-stubs", false, "If true, don't write the full chunk to cache, just a stub entry.") cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f) - f.DurationVar(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", 0, "Cache index entries older than this period. 0 to disable.") - f.DurationVar(&cfg.MaxLookBackPeriod, "store.max-look-back-period", 0, "Limit how long back data can be queried") + f.Var(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", "Cache index entries older than this period. 0 to disable.") + f.Var(&cfg.MaxLookBackPeriod, "store.max-look-back-period", "Limit how long back data can be queried") } -// store implements Store -type store struct { +type baseStore struct { cfg StoreConfig index IndexClient chunks Client - schema Schema + schema BaseSchema limits StoreLimits *Fetcher } -func newStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (Store, error) { +func newBaseStore(cfg StoreConfig, schema BaseSchema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (baseStore, error) { fetcher, err := NewChunkFetcher(chunksCache, cfg.chunkCacheStubs, chunks) if err != nil { - return nil, err + return baseStore{}, err } - return &store{ + return baseStore{ cfg: cfg, index: index, chunks: chunks, @@ -98,10 +97,29 @@ func newStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Client, }, nil } +// store implements Store +type store struct { + baseStore + schema StoreSchema +} + +func newStore(cfg StoreConfig, schema StoreSchema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (Store, error) { + rs, err := newBaseStore(cfg, schema, index, chunks, limits, chunksCache) + if err != nil { + return nil, err + } + + return &store{ + baseStore: rs, + schema: schema, + }, nil +} + // Stop any background goroutines (ie in the cache.) func (c *store) Stop() { c.storage.Stop() c.Fetcher.Stop() + c.index.Stop() } // Put implements ChunkStore @@ -186,7 +204,7 @@ func (c *store) GetChunkRefs(ctx context.Context, userID string, from, through m } // LabelValuesForMetricName retrieves all label values for a single label name and metric name. -func (c *store) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName, labelName string) ([]string, error) { +func (c *baseStore) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName, labelName string) ([]string, error) { log, ctx := spanlogger.New(ctx, "ChunkStore.LabelValues") defer log.Span.Finish() level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "labelName", labelName) @@ -252,7 +270,7 @@ func (c *store) LabelNamesForMetricName(ctx context.Context, userID string, from return labelNamesFromChunks(allChunks), nil } -func (c *store) validateQueryTimeRange(ctx context.Context, userID string, from *model.Time, through *model.Time) (bool, error) { +func (c *baseStore) validateQueryTimeRange(ctx context.Context, userID string, from *model.Time, through *model.Time) (bool, error) { //nolint:ineffassign,staticcheck //Leaving ctx even though we don't currently use it, we want to make it available for when we might need it and hopefully will ensure us using the correct context at that time log, ctx := spanlogger.New(ctx, "store.validateQueryTimeRange") defer log.Span.Finish() @@ -275,7 +293,7 @@ func (c *store) validateQueryTimeRange(ctx context.Context, userID string, from } if c.cfg.MaxLookBackPeriod != 0 { - oldestStartTime := model.Now().Add(-c.cfg.MaxLookBackPeriod) + oldestStartTime := model.Now().Add(-time.Duration(c.cfg.MaxLookBackPeriod)) if oldestStartTime.After(*from) { *from = oldestStartTime } @@ -290,7 +308,7 @@ func (c *store) validateQueryTimeRange(ctx context.Context, userID string, from return false, nil } -func (c *store) validateQuery(ctx context.Context, userID string, from *model.Time, through *model.Time, matchers []*labels.Matcher) (string, []*labels.Matcher, bool, error) { +func (c *baseStore) validateQuery(ctx context.Context, userID string, from *model.Time, through *model.Time, matchers []*labels.Matcher) (string, []*labels.Matcher, bool, error) { log, ctx := spanlogger.New(ctx, "store.validateQuery") defer log.Span.Finish() @@ -378,36 +396,12 @@ func (c *store) lookupChunksByMetricName(ctx context.Context, userID string, fro incomingErrors := make(chan error) for _, matcher := range matchers { go func(matcher *labels.Matcher) { - // Lookup IndexQuery's - var queries []IndexQuery - var err error - if matcher.Type != labels.MatchEqual { - queries, err = c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, matcher.Name) - } else { - queries, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, matcher.Name, matcher.Value) - } - if err != nil { - incomingErrors <- err - return - } - level.Debug(log).Log("matcher", matcher, "queries", len(queries)) - - // Lookup IndexEntry's - entries, err := c.lookupEntriesByQueries(ctx, queries) + chunkIDs, err := c.lookupIdsByMetricNameMatcher(ctx, from, through, userID, metricName, matcher, nil) if err != nil { incomingErrors <- err - return - } - level.Debug(log).Log("matcher", matcher, "entries", len(entries)) - - // Convert IndexEntry's to chunk IDs, filter out non-matchers at the same time. - chunkIDs, err := c.parseIndexEntries(ctx, entries, matcher) - if err != nil { - incomingErrors <- err - return + } else { + incomingChunkIDs <- chunkIDs } - level.Debug(log).Log("matcher", matcher, "chunkIDs", len(chunkIDs)) - incomingChunkIDs <- chunkIDs }(matcher) } @@ -435,7 +429,62 @@ func (c *store) lookupChunksByMetricName(ctx context.Context, userID string, fro return c.convertChunkIDsToChunks(ctx, userID, chunkIDs) } -func (c *store) lookupEntriesByQueries(ctx context.Context, queries []IndexQuery) ([]IndexEntry, error) { +func (c *baseStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, filter func([]IndexQuery) []IndexQuery) ([]string, error) { + log, ctx := spanlogger.New(ctx, "Store.lookupIdsByMetricNameMatcher", "metricName", metricName, "matcher", matcher) + defer log.Span.Finish() + + var err error + var queries []IndexQuery + var labelName string + if matcher == nil { + queries, err = c.schema.GetReadQueriesForMetric(from, through, userID, metricName) + } else if matcher.Type == labels.MatchEqual { + labelName = matcher.Name + queries, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, matcher.Name, matcher.Value) + } else if matcher.Type == labels.MatchRegexp && len(findSetMatches(matcher.Value)) > 0 { + set := findSetMatches(matcher.Value) + for _, v := range set { + var qs []IndexQuery + qs, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, matcher.Name, v) + if err != nil { + break + } + queries = append(queries, qs...) + } + } else { + labelName = matcher.Name + queries, err = c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, matcher.Name) + } + if err != nil { + return nil, err + } + level.Debug(log).Log("matcher", matcher, "queries", len(queries)) + + if filter != nil { + queries = filter(queries) + level.Debug(log).Log("matcher", matcher, "filteredQueries", len(queries)) + } + + entries, err := c.lookupEntriesByQueries(ctx, queries) + if e, ok := err.(CardinalityExceededError); ok { + e.MetricName = metricName + e.LabelName = labelName + return nil, e + } else if err != nil { + return nil, err + } + level.Debug(log).Log("matcher", matcher, "entries", len(entries)) + + ids, err := c.parseIndexEntries(ctx, entries, matcher) + if err != nil { + return nil, err + } + level.Debug(log).Log("matcher", matcher, "ids", len(ids)) + + return ids, nil +} + +func (c *baseStore) lookupEntriesByQueries(ctx context.Context, queries []IndexQuery) ([]IndexEntry, error) { log, ctx := spanlogger.New(ctx, "store.lookupEntriesByQueries") defer log.Span.Finish() @@ -461,7 +510,7 @@ func (c *store) lookupEntriesByQueries(ctx context.Context, queries []IndexQuery return entries, err } -func (c *store) parseIndexEntries(ctx context.Context, entries []IndexEntry, matcher *labels.Matcher) ([]string, error) { +func (c *baseStore) parseIndexEntries(ctx context.Context, entries []IndexEntry, matcher *labels.Matcher) ([]string, error) { result := make([]string, 0, len(entries)) for _, entry := range entries { chunkKey, labelValue, _, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value) @@ -480,7 +529,7 @@ func (c *store) parseIndexEntries(ctx context.Context, entries []IndexEntry, mat return result, nil } -func (c *store) convertChunkIDsToChunks(ctx context.Context, userID string, chunkIDs []string) ([]Chunk, error) { +func (c *baseStore) convertChunkIDsToChunks(ctx context.Context, userID string, chunkIDs []string) ([]Chunk, error) { chunkSet := make([]Chunk, 0, len(chunkIDs)) for _, chunkID := range chunkIDs { chunk, err := ParseExternalKey(userID, chunkID) @@ -509,7 +558,7 @@ func (c *store) DeleteChunk(ctx context.Context, from, through model.Time, userI }) } -func (c *store) deleteChunk(ctx context.Context, +func (c *baseStore) deleteChunk(ctx context.Context, userID string, chunkID string, metric labels.Labels, @@ -551,7 +600,7 @@ func (c *store) deleteChunk(ctx context.Context, return nil } -func (c *store) reboundChunk(ctx context.Context, userID, chunkID string, partiallyDeletedInterval model.Interval, putChunkFunc func(chunk Chunk) error) error { +func (c *baseStore) reboundChunk(ctx context.Context, userID, chunkID string, partiallyDeletedInterval model.Interval, putChunkFunc func(chunk Chunk) error) error { chunk, err := ParseExternalKey(userID, chunkID) if err != nil { return errors.Wrap(err, "when parsing external key") diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go index 78cd7c14fe49..c18a38a16725 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go @@ -40,7 +40,7 @@ func labelNamesFromChunks(chunks []Chunk) []string { var result UniqueStrings for _, c := range chunks { for _, l := range c.Metric { - result.Add(string(l.Name)) + result.Add(l.Name) } } return result.Strings() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go index 366ca1de86dc..2dc4b155bb46 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go @@ -2,6 +2,7 @@ package chunk import ( "context" + "errors" "sort" "time" @@ -59,19 +60,32 @@ func NewCompositeStore() CompositeStore { // AddPeriod adds the configuration for a period of time to the CompositeStore func (c *CompositeStore) AddPeriod(storeCfg StoreConfig, cfg PeriodConfig, index IndexClient, chunks Client, limits StoreLimits, chunksCache, writeDedupeCache cache.Cache) error { - schema := cfg.CreateSchema() - var store Store - var err error - switch cfg.Schema { - case "v9", "v10", "v11": - store, err = newSeriesStore(storeCfg, schema, index, chunks, limits, chunksCache, writeDedupeCache) + schema, err := cfg.CreateSchema() + if err != nil { + return err + } + + return c.addSchema(storeCfg, schema, cfg.From.Time, index, chunks, limits, chunksCache, writeDedupeCache) +} + +func (c *CompositeStore) addSchema(storeCfg StoreConfig, schema BaseSchema, start model.Time, index IndexClient, chunks Client, limits StoreLimits, chunksCache, writeDedupeCache cache.Cache) error { + var ( + err error + store Store + ) + + switch s := schema.(type) { + case SeriesStoreSchema: + store, err = newSeriesStore(storeCfg, s, index, chunks, limits, chunksCache, writeDedupeCache) + case StoreSchema: + store, err = newStore(storeCfg, s, index, chunks, limits, chunksCache) default: - store, err = newStore(storeCfg, schema, index, chunks, limits, chunksCache) + err = errors.New("invalid schema type") } if err != nil { return err } - c.stores = append(c.stores, compositeStoreEntry{start: model.TimeFromUnixNano(cfg.From.UnixNano()), Store: store}) + c.stores = append(c.stores, compositeStoreEntry{start: start, Store: store}) return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go index 39a7f44c8788..9e2628825e6b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go @@ -51,7 +51,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.TableCacheEnabled, "bigtable.table-cache.enabled", true, "If enabled, once a tables info is fetched, it is cached.") f.DurationVar(&cfg.TableCacheExpiration, "bigtable.table-cache.expiration", 30*time.Minute, "Duration to cache tables before checking again.") - cfg.GRPCClientConfig.RegisterFlags("bigtable", f) + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("bigtable", f) } // storageClientColumnKey implements chunk.storageClient for GCP. @@ -151,6 +151,18 @@ type bigtableWriteBatch struct { } func (b bigtableWriteBatch) Add(tableName, hashValue string, rangeValue []byte, value []byte) { + b.addMutation(tableName, hashValue, rangeValue, func(mutation *bigtable.Mutation, columnKey string) { + mutation.Set(columnFamily, columnKey, 0, value) + }) +} + +func (b bigtableWriteBatch) Delete(tableName, hashValue string, rangeValue []byte) { + b.addMutation(tableName, hashValue, rangeValue, func(mutation *bigtable.Mutation, columnKey string) { + mutation.DeleteCellsInColumn(columnFamily, columnKey) + }) +} + +func (b bigtableWriteBatch) addMutation(tableName, hashValue string, rangeValue []byte, callback func(mutation *bigtable.Mutation, columnKey string)) { rows, ok := b.tables[tableName] if !ok { rows = map[string]*bigtable.Mutation{} @@ -164,12 +176,7 @@ func (b bigtableWriteBatch) Add(tableName, hashValue string, rangeValue []byte, rows[rowKey] = mutation } - mutation.Set(columnFamily, columnKey, 0, value) -} - -func (b bigtableWriteBatch) Delete(tableName, hashValue string, rangeValue []byte) { - // ToDo: implement this to support deleting index entries from Bigtable - panic("Bigtable does not support Deleting index entries yet") + callback(mutation, columnKey) } func (s *storageClientColumnKey) BatchWrite(ctx context.Context, batch chunk.WriteBatch) error { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go index 4c3659a429e9..b85012f65688 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go @@ -8,16 +8,16 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" ) -type tableClient struct { +type TableClient struct { directory string } // NewTableClient returns a new TableClient. func NewTableClient(directory string) (chunk.TableClient, error) { - return &tableClient{directory: directory}, nil + return &TableClient{directory: directory}, nil } -func (c *tableClient) ListTables(ctx context.Context) ([]string, error) { +func (c *TableClient) ListTables(ctx context.Context) ([]string, error) { boltDbFiles := []string{} err := filepath.Walk(c.directory, func(path string, info os.FileInfo, err error) error { if err != nil { @@ -35,20 +35,20 @@ func (c *tableClient) ListTables(ctx context.Context) ([]string, error) { return boltDbFiles, nil } -func (c *tableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) error { +func (c *TableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) error { return nil } -func (c *tableClient) DeleteTable(ctx context.Context, name string) error { +func (c *TableClient) DeleteTable(ctx context.Context, name string) error { return os.Remove(filepath.Join(c.directory, name)) } -func (c *tableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { +func (c *TableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { return chunk.TableDesc{ Name: name, }, true, nil } -func (c *tableClient) UpdateTable(ctx context.Context, current, expected chunk.TableDesc) error { +func (c *TableClient) UpdateTable(ctx context.Context, current, expected chunk.TableDesc) error { return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go index a10bd297648a..40b081c78052 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go @@ -11,6 +11,7 @@ import ( "time" "github.com/go-kit/kit/log/level" + "github.com/thanos-io/thanos/pkg/runutil" "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/util" @@ -74,10 +75,19 @@ func (f *FSObjectClient) PutObject(ctx context.Context, objectKey string, object return err } - defer fl.Close() + defer runutil.CloseWithLogOnErr(pkgUtil.Logger, fl, "fullPath: %s", fullPath) _, err = io.Copy(fl, object) - return err + if err != nil { + return err + } + + err = fl.Sync() + if err != nil { + return err + } + + return fl.Close() } // List only objects from the store non-recursively diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/opts.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/opts.go new file mode 100644 index 000000000000..10d07b012f75 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/opts.go @@ -0,0 +1,60 @@ +package chunk + +import ( + "strings" + "unicode/utf8" +) + +// Bitmap used by func isRegexMetaCharacter to check whether a character needs to be escaped. +var regexMetaCharacterBytes [16]byte + +// isRegexMetaCharacter reports whether byte b needs to be escaped. +func isRegexMetaCharacter(b byte) bool { + return b < utf8.RuneSelf && regexMetaCharacterBytes[b%16]&(1<<(b/16)) != 0 +} + +func init() { + for _, b := range []byte(`.+*?()|[]{}^$`) { + regexMetaCharacterBytes[b%16] |= 1 << (b / 16) + } +} + +// copied from Prometheus querier.go, removed check for Prometheus wrapper. +// Returns list of values that can regex matches. +func findSetMatches(pattern string) []string { + escaped := false + sets := []*strings.Builder{{}} + for i := 0; i < len(pattern); i++ { + if escaped { + switch { + case isRegexMetaCharacter(pattern[i]): + sets[len(sets)-1].WriteByte(pattern[i]) + case pattern[i] == '\\': + sets[len(sets)-1].WriteByte('\\') + default: + return nil + } + escaped = false + } else { + switch { + case isRegexMetaCharacter(pattern[i]): + if pattern[i] == '|' { + sets = append(sets, &strings.Builder{}) + } else { + return nil + } + case pattern[i] == '\\': + escaped = true + default: + sets[len(sets)-1].WriteByte(pattern[i]) + } + } + } + matches := make([]string, 0, len(sets)) + for _, s := range sets { + if s.Len() > 0 { + matches = append(matches, s.String()) + } + } + return matches +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go index 30a4035e3c2e..0ec5bd35aac5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go @@ -18,12 +18,12 @@ type DeleteRequestHandler struct { } // NewDeleteRequestHandler creates a DeleteRequestHandler -func NewDeleteRequestHandler(deleteStore *DeleteStore) (*DeleteRequestHandler, error) { +func NewDeleteRequestHandler(deleteStore *DeleteStore) *DeleteRequestHandler { deleteMgr := DeleteRequestHandler{ deleteStore: deleteStore, } - return &deleteMgr, nil + return &deleteMgr } // AddDeleteRequestHandler handles addition of new delete request diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go index c7af79a2763c..e6a8c68d0cca 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go @@ -40,22 +40,33 @@ var ( type hasChunksForIntervalFunc func(userID, seriesID string, from, through model.Time) (bool, error) -// Schema interface defines methods to calculate the hash and range keys needed +// Schema interfaces define methods to calculate the hash and range keys needed // to write or read chunks from the external index. -type Schema interface { - // When doing a write, use this method to return the list of entries you should write to. - GetWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) - - // Should only be used with the seriesStore. TODO: Make seriesStore implement a different interface altogether. - // returns cache key string and []IndexEntry per bucket, matched in order - GetCacheKeysAndLabelWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]string, [][]IndexEntry, error) - GetChunkWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) +// BasicSchema has operation shared between StoreSchema and SeriesStoreSchema +type BaseSchema interface { // When doing a read, use these methods to return the list of entries you should query GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery +} + +// StoreSchema is a schema used by store +type StoreSchema interface { + BaseSchema + + // When doing a write, use this method to return the list of entries you should write to. + GetWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) +} + +// SeriesStoreSchema is a schema used by seriesStore +type SeriesStoreSchema interface { + BaseSchema + + // returns cache key string and []IndexEntry per bucket, matched in order + GetCacheKeysAndLabelWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]string, [][]IndexEntry, error) + GetChunkWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) // If the query resulted in series IDs, use this method to find chunks. GetChunksForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) @@ -102,13 +113,39 @@ type IndexEntry struct { type schemaBucketsFunc func(from, through model.Time, userID string) []Bucket -// schema implements Schema given a bucketing function and and set of range key callbacks -type schema struct { +// baseSchema implements BaseSchema given a bucketing function and and set of range key callbacks +type baseSchema struct { buckets schemaBucketsFunc - entries entries + entries baseEntries +} + +// storeSchema implements StoreSchema given a bucketing function and and set of range key callbacks +type storeSchema struct { + baseSchema + entries storeEntries +} + +// seriesStoreSchema implements SeriesStoreSchema given a bucketing function and and set of range key callbacks +type seriesStoreSchema struct { + baseSchema + entries seriesStoreEntries +} + +func newStoreSchema(buckets schemaBucketsFunc, entries storeEntries) storeSchema { + return storeSchema{ + baseSchema: baseSchema{buckets: buckets, entries: entries}, + entries: entries, + } } -func (s schema) GetWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { +func newSeriesStoreSchema(buckets schemaBucketsFunc, entries seriesStoreEntries) seriesStoreSchema { + return seriesStoreSchema{ + baseSchema: baseSchema{buckets: buckets, entries: entries}, + entries: entries, + } +} + +func (s storeSchema) GetWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { var result []IndexEntry for _, bucket := range s.buckets(from, through, userID) { @@ -122,7 +159,7 @@ func (s schema) GetWriteEntries(from, through model.Time, userID string, metricN } // returns cache key string and []IndexEntry per bucket, matched in order -func (s schema) GetCacheKeysAndLabelWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]string, [][]IndexEntry, error) { +func (s seriesStoreSchema) GetCacheKeysAndLabelWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]string, [][]IndexEntry, error) { var keys []string var indexEntries [][]IndexEntry @@ -148,7 +185,7 @@ func (s schema) GetCacheKeysAndLabelWriteEntries(from, through model.Time, userI return keys, indexEntries, nil } -func (s schema) GetChunkWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { +func (s seriesStoreSchema) GetChunkWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { var result []IndexEntry for _, bucket := range s.buckets(from, through, userID) { @@ -162,7 +199,7 @@ func (s schema) GetChunkWriteEntries(from, through model.Time, userID string, me } -func (s schema) GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) { +func (s baseSchema) GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) { var result []IndexQuery buckets := s.buckets(from, through, userID) @@ -176,7 +213,7 @@ func (s schema) GetReadQueriesForMetric(from, through model.Time, userID string, return result, nil } -func (s schema) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) { +func (s baseSchema) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) { var result []IndexQuery buckets := s.buckets(from, through, userID) @@ -190,7 +227,7 @@ func (s schema) GetReadQueriesForMetricLabel(from, through model.Time, userID st return result, nil } -func (s schema) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { +func (s baseSchema) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { var result []IndexQuery buckets := s.buckets(from, through, userID) @@ -204,7 +241,7 @@ func (s schema) GetReadQueriesForMetricLabelValue(from, through model.Time, user return result, nil } -func (s schema) GetChunksForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { +func (s seriesStoreSchema) GetChunksForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { var result []IndexQuery buckets := s.buckets(from, through, userID) @@ -222,7 +259,7 @@ func (s schema) GetChunksForSeries(from, through model.Time, userID string, seri // Since SeriesIDs are created per bucket, it makes sure that we don't include series entries which are in use by verifying using hasChunksForIntervalFunc i.e // It checks first and last buckets covered by the time interval to see if a SeriesID still has chunks in the store, // if yes then it doesn't include IndexEntry's for that bucket for deletion. -func (s schema) GetSeriesDeleteEntries(from, through model.Time, userID string, metric labels.Labels, hasChunksForIntervalFunc hasChunksForIntervalFunc) ([]IndexEntry, error) { +func (s seriesStoreSchema) GetSeriesDeleteEntries(from, through model.Time, userID string, metric labels.Labels, hasChunksForIntervalFunc hasChunksForIntervalFunc) ([]IndexEntry, error) { metricName := metric.Get(model.MetricNameLabel) if metricName == "" { return nil, ErrMetricNameLabelMissing @@ -290,7 +327,7 @@ func (s schema) GetSeriesDeleteEntries(from, through model.Time, userID string, return result, nil } -func (s schema) GetLabelNamesForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { +func (s seriesStoreSchema) GetLabelNamesForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { var result []IndexQuery buckets := s.buckets(from, through, userID) @@ -304,21 +341,33 @@ func (s schema) GetLabelNamesForSeries(from, through model.Time, userID string, return result, nil } -func (s schema) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery { +func (s baseSchema) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery { return s.entries.FilterReadQueries(queries, shard) } -type entries interface { +type baseEntries interface { + GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) + GetReadMetricLabelQueries(bucket Bucket, metricName string, labelName string) ([]IndexQuery, error) + GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) + FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery +} + +// used by storeSchema +type storeEntries interface { + baseEntries + GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) +} + +// used by seriesStoreSchema +type seriesStoreEntries interface { + baseEntries + GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) - GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) - GetReadMetricLabelQueries(bucket Bucket, metricName string, labelName string) ([]IndexQuery, error) - GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) GetChunksForSeries(bucket Bucket, seriesID []byte) ([]IndexQuery, error) GetLabelNamesForSeries(bucket Bucket, seriesID []byte) ([]IndexQuery, error) - FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery } // original entries: @@ -346,13 +395,6 @@ func (originalEntries) GetWriteEntries(bucket Bucket, metricName string, labels return result, nil } -func (originalEntries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} -func (originalEntries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} - func (originalEntries) GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) { return []IndexQuery{ { @@ -386,14 +428,6 @@ func (originalEntries) GetReadMetricLabelValueQueries(bucket Bucket, metricName }, nil } -func (originalEntries) GetChunksForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { - return nil, ErrNotSupported -} - -func (originalEntries) GetLabelNamesForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { - return nil, ErrNotSupported -} - func (originalEntries) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery { return queries } @@ -423,13 +457,6 @@ func (base64Entries) GetWriteEntries(bucket Bucket, metricName string, labels la return result, nil } -func (base64Entries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} -func (base64Entries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} - func (base64Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { encodedBytes := encodeBase64Value(labelValue) return []IndexQuery{ @@ -473,13 +500,6 @@ func (labelNameInHashKeyEntries) GetWriteEntries(bucket Bucket, metricName strin return entries, nil } -func (labelNameInHashKeyEntries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} -func (labelNameInHashKeyEntries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} - func (labelNameInHashKeyEntries) GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) { return []IndexQuery{ { @@ -509,14 +529,6 @@ func (labelNameInHashKeyEntries) GetReadMetricLabelValueQueries(bucket Bucket, m }, nil } -func (labelNameInHashKeyEntries) GetChunksForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { - return nil, ErrNotSupported -} - -func (labelNameInHashKeyEntries) GetLabelNamesForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { - return nil, ErrNotSupported -} - func (labelNameInHashKeyEntries) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery { return queries } @@ -553,13 +565,6 @@ func (v5Entries) GetWriteEntries(bucket Bucket, metricName string, labels labels return entries, nil } -func (v5Entries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} -func (v5Entries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} - func (v5Entries) GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) { return []IndexQuery{ { @@ -587,14 +592,6 @@ func (v5Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName string }, nil } -func (v5Entries) GetChunksForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { - return nil, ErrNotSupported -} - -func (v5Entries) GetLabelNamesForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { - return nil, ErrNotSupported -} - func (v5Entries) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery { return queries } @@ -630,13 +627,6 @@ func (v6Entries) GetWriteEntries(bucket Bucket, metricName string, labels labels return entries, nil } -func (v6Entries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} -func (v6Entries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} - func (v6Entries) GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) { encodedFromBytes := encodeTime(bucket.from) return []IndexQuery{ @@ -671,14 +661,6 @@ func (v6Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName string }, nil } -func (v6Entries) GetChunksForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { - return nil, ErrNotSupported -} - -func (v6Entries) GetLabelNamesForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { - return nil, ErrNotSupported -} - func (v6Entries) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery { return queries } @@ -686,10 +668,6 @@ func (v6Entries) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardA // v9Entries adds a layer of indirection between labels -> series -> chunks. type v9Entries struct{} -func (v9Entries) GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} - func (v9Entries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { seriesID := labelsSeriesID(labels) @@ -790,10 +768,6 @@ type v10Entries struct { rowShards uint32 } -func (v10Entries) GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { - return nil, ErrNotSupported -} - func (s v10Entries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { seriesID := labelsSeriesID(labels) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go index 73c898626931..467f572f3378 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go @@ -8,13 +8,13 @@ import ( ) type schemaCaching struct { - Schema + SeriesStoreSchema cacheOlderThan time.Duration } func (s *schemaCaching) GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) { - queries, err := s.Schema.GetReadQueriesForMetric(from, through, userID, metricName) + queries, err := s.SeriesStoreSchema.GetReadQueriesForMetric(from, through, userID, metricName) if err != nil { return nil, err } @@ -22,7 +22,7 @@ func (s *schemaCaching) GetReadQueriesForMetric(from, through model.Time, userID } func (s *schemaCaching) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) { - queries, err := s.Schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName) + queries, err := s.SeriesStoreSchema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName) if err != nil { return nil, err } @@ -30,7 +30,7 @@ func (s *schemaCaching) GetReadQueriesForMetricLabel(from, through model.Time, u } func (s *schemaCaching) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { - queries, err := s.Schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, labelName, labelValue) + queries, err := s.SeriesStoreSchema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, labelName, labelValue) if err != nil { return nil, err } @@ -39,7 +39,7 @@ func (s *schemaCaching) GetReadQueriesForMetricLabelValue(from, through model.Ti // If the query resulted in series IDs, use this method to find chunks. func (s *schemaCaching) GetChunksForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { - queries, err := s.Schema.GetChunksForSeries(from, through, userID, seriesID) + queries, err := s.SeriesStoreSchema.GetChunksForSeries(from, through, userID, seriesID) if err != nil { return nil, err } @@ -47,7 +47,7 @@ func (s *schemaCaching) GetChunksForSeries(from, through model.Time, userID stri } func (s *schemaCaching) GetLabelNamesForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { - queries, err := s.Schema.GetLabelNamesForSeries(from, through, userID, seriesID) + queries, err := s.SeriesStoreSchema.GetLabelNamesForSeries(from, through, userID, seriesID) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go index 28bf1b8b8275..617cdbd577ab 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go @@ -145,41 +145,47 @@ func (cfg *SchemaConfig) ForEachAfter(t model.Time, f func(config *PeriodConfig) } // CreateSchema returns the schema defined by the PeriodConfig -func (cfg PeriodConfig) CreateSchema() Schema { +func (cfg PeriodConfig) CreateSchema() (BaseSchema, error) { + buckets, bucketsPeriod := cfg.createBucketsFunc() + + // Ensure the tables period is a multiple of the bucket period + if cfg.IndexTables.Period > 0 && cfg.IndexTables.Period%bucketsPeriod != 0 { + return nil, errInvalidTablePeriod + } + + if cfg.ChunkTables.Period > 0 && cfg.ChunkTables.Period%bucketsPeriod != 0 { + return nil, errInvalidTablePeriod + } - var e entries switch cfg.Schema { case "v1": - e = originalEntries{} + return newStoreSchema(buckets, originalEntries{}), nil case "v2": - e = originalEntries{} + return newStoreSchema(buckets, originalEntries{}), nil case "v3": - e = base64Entries{originalEntries{}} + return newStoreSchema(buckets, base64Entries{originalEntries{}}), nil case "v4": - e = labelNameInHashKeyEntries{} + return newStoreSchema(buckets, labelNameInHashKeyEntries{}), nil case "v5": - e = v5Entries{} + return newStoreSchema(buckets, v5Entries{}), nil case "v6": - e = v6Entries{} + return newStoreSchema(buckets, v6Entries{}), nil case "v9": - e = v9Entries{} - case "v10": - e = v10Entries{ - rowShards: cfg.RowShards, + return newSeriesStoreSchema(buckets, v9Entries{}), nil + case "v10", "v11": + if cfg.RowShards == 0 { + return nil, fmt.Errorf("Must have row_shards > 0 (current: %d) for schema (%s)", cfg.RowShards, cfg.Schema) } - case "v11": - e = v11Entries{ - v10Entries: v10Entries{ - rowShards: cfg.RowShards, - }, + + v10 := v10Entries{rowShards: cfg.RowShards} + if cfg.Schema == "v10" { + return newSeriesStoreSchema(buckets, v10), nil } + + return newSeriesStoreSchema(buckets, v11Entries{v10}), nil default: - return nil + return nil, errInvalidSchemaVersion } - - buckets, _ := cfg.createBucketsFunc() - - return schema{buckets, e} } func (cfg PeriodConfig) createBucketsFunc() (schemaBucketsFunc, time.Duration) { @@ -199,35 +205,8 @@ func (cfg *PeriodConfig) applyDefaults() { // Validate the period config. func (cfg PeriodConfig) validate() error { - // Ensure the schema version exists - schema := cfg.CreateSchema() - if schema == nil { - return errInvalidSchemaVersion - } - - // Ensure the tables period is a multiple of the bucket period - _, bucketsPeriod := cfg.createBucketsFunc() - - if cfg.IndexTables.Period > 0 && cfg.IndexTables.Period%bucketsPeriod != 0 { - return errInvalidTablePeriod - } - - if cfg.ChunkTables.Period > 0 && cfg.ChunkTables.Period%bucketsPeriod != 0 { - return errInvalidTablePeriod - } - - switch cfg.Schema { - case "v1", "v2", "v3", "v4", "v5", "v6", "v9": - case "v10", "v11": - if cfg.RowShards == 0 { - return fmt.Errorf("Must have row_shards > 0 (current: %d) for schema (%s)", cfg.RowShards, cfg.Schema) - } - default: - // This generally unreachable path protects us from adding schemas and not handling them in this function. - return fmt.Errorf("unexpected schema (%s)", cfg.Schema) - } - - return nil + _, err := cfg.CreateSchema() + return err } // Load the yaml file, or build the config from legacy command-line flags diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index 7d99e0349c5c..3c94143c073c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "time" "github.com/go-kit/kit/log/level" jsoniter "github.com/json-iterator/go" @@ -64,32 +65,27 @@ var ( // seriesStore implements Store type seriesStore struct { - store + baseStore + schema SeriesStoreSchema writeDedupeCache cache.Cache } -func newSeriesStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Client, limits StoreLimits, chunksCache, writeDedupeCache cache.Cache) (Store, error) { - fetcher, err := NewChunkFetcher(chunksCache, cfg.chunkCacheStubs, chunks) +func newSeriesStore(cfg StoreConfig, schema SeriesStoreSchema, index IndexClient, chunks Client, limits StoreLimits, chunksCache, writeDedupeCache cache.Cache) (Store, error) { + rs, err := newBaseStore(cfg, schema, index, chunks, limits, chunksCache) if err != nil { return nil, err } if cfg.CacheLookupsOlderThan != 0 { schema = &schemaCaching{ - Schema: schema, - cacheOlderThan: cfg.CacheLookupsOlderThan, + SeriesStoreSchema: schema, + cacheOlderThan: time.Duration(cfg.CacheLookupsOlderThan), } } return &seriesStore{ - store: store{ - cfg: cfg, - index: index, - chunks: chunks, - schema: schema, - limits: limits, - Fetcher: fetcher, - }, + baseStore: rs, + schema: schema, writeDedupeCache: writeDedupeCache, }, nil } @@ -183,7 +179,7 @@ func (c *seriesStore) GetChunkRefs(ctx context.Context, userID string, from, thr level.Debug(log).Log("chunks-post-filtering", len(chunks)) chunksPerQuery.Observe(float64(len(chunks))) - return [][]Chunk{chunks}, []*Fetcher{c.store.Fetcher}, nil + return [][]Chunk{chunks}, []*Fetcher{c.baseStore.Fetcher}, nil } // LabelNamesForMetricName retrieves all label names for a metric name. @@ -338,47 +334,9 @@ func (c *seriesStore) lookupSeriesByMetricNameMatchers(ctx context.Context, from } func (c *seriesStore) lookupSeriesByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, shard *astmapper.ShardAnnotation) ([]string, error) { - log, ctx := spanlogger.New(ctx, "SeriesStore.lookupSeriesByMetricNameMatcher", "metricName", metricName, "matcher", matcher) - defer log.Span.Finish() - - var err error - var queries []IndexQuery - var labelName string - if matcher == nil { - queries, err = c.schema.GetReadQueriesForMetric(from, through, userID, metricName) - } else if matcher.Type != labels.MatchEqual { - labelName = matcher.Name - queries, err = c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, matcher.Name) - } else { - labelName = matcher.Name - queries, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, matcher.Name, matcher.Value) - } - if err != nil { - return nil, err - } - level.Debug(log).Log("queries", len(queries)) - - queries = c.schema.FilterReadQueries(queries, shard) - - level.Debug(log).Log("filteredQueries", len(queries)) - - entries, err := c.lookupEntriesByQueries(ctx, queries) - if e, ok := err.(CardinalityExceededError); ok { - e.MetricName = metricName - e.LabelName = labelName - return nil, e - } else if err != nil { - return nil, err - } - level.Debug(log).Log("entries", len(entries)) - - ids, err := c.parseIndexEntries(ctx, entries, matcher) - if err != nil { - return nil, err - } - level.Debug(log).Log("ids", len(ids)) - - return ids, nil + return c.lookupIdsByMetricNameMatcher(ctx, from, through, userID, metricName, matcher, func(queries []IndexQuery) []IndexQuery { + return c.schema.FilterReadQueries(queries, shard) + }) } func (c *seriesStore) lookupChunksBySeries(ctx context.Context, from, through model.Time, userID string, seriesIDs []string) ([]string, error) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go index bf08a64ab8f7..fc94907a1447 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go @@ -25,8 +25,8 @@ func (f fixture) Clients() (chunk.IndexClient, chunk.Client, chunk.TableClient, } indexClient, chunkClient, tableClient, schemaConfig, err := f.fixture.Clients() indexClient = newCachingIndexClient(indexClient, cache.NewFifoCache("index-fifo", cache.FifoCacheConfig{ - Size: 500, - Validity: 5 * time.Minute, + MaxSizeItems: 500, + Validity: 5 * time.Minute, }), 5*time.Minute, limits) return indexClient, chunkClient, tableClient, schemaConfig, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go index f1863653b792..7af46eea056d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go @@ -63,6 +63,7 @@ func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity tim func (s *cachingIndexClient) Stop() { s.cache.Stop() + s.IndexClient.Stop() } func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.IndexQuery, callback func(chunk.IndexQuery, chunk.ReadBatch) (shouldContinue bool)) error { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index ef74d02ebf9d..436f6e92ba43 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -28,17 +28,23 @@ const ( StorageEngineTSDB = "tsdb" ) +type indexStoreFactories struct { + indexClientFactoryFunc IndexClientFactoryFunc + tableClientFactoryFunc TableClientFactoryFunc +} + +// IndexClientFactoryFunc defines signature of function which creates chunk.IndexClient for managing index in index store type IndexClientFactoryFunc func() (chunk.IndexClient, error) -var customIndexClients = map[string]IndexClientFactoryFunc{} +// TableClientFactoryFunc defines signature of function which creates chunk.TableClient for managing tables in index store +type TableClientFactoryFunc func() (chunk.TableClient, error) -func RegisterIndexClient(name string, factory IndexClientFactoryFunc) { - customIndexClients[name] = factory -} +var customIndexStores = map[string]indexStoreFactories{} -// useful for cleaning up state after tests -func unregisterAllCustomIndexClients() { - customIndexClients = map[string]IndexClientFactoryFunc{} +// RegisterIndexStore is used for registering a custom index type. +// When an index type is registered here with same name as existing types, the registered one takes the precedence. +func RegisterIndexStore(name string, indexClientFactory IndexClientFactoryFunc, tableClientFactory TableClientFactoryFunc) { + customIndexStores[name] = indexStoreFactories{indexClientFactory, tableClientFactory} } // StoreLimits helps get Limits specific to Queries for Stores @@ -151,8 +157,10 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf // NewIndexClient makes a new index client of the desired type. func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chunk.IndexClient, error) { - if factory, ok := customIndexClients[name]; ok { - return factory() + if indexClientFactory, ok := customIndexStores[name]; ok { + if indexClientFactory.indexClientFactoryFunc != nil { + return indexClientFactory.indexClientFactoryFunc() + } } switch name { @@ -230,6 +238,12 @@ func newChunkClientFromStore(store chunk.ObjectClient, err error) (chunk.Client, // NewTableClient makes a new table client based on the configuration. func NewTableClient(name string, cfg Config) (chunk.TableClient, error) { + if indexClientFactory, ok := customIndexStores[name]; ok { + if indexClientFactory.tableClientFactoryFunc != nil { + return indexClientFactory.tableClientFactoryFunc() + } + } + switch name { case "inmemory": return chunk.NewMockStorage(), nil @@ -265,11 +279,17 @@ func NewBucketClient(storageConfig Config) (chunk.BucketClient, error) { // NewObjectClient makes a new StorageClient of the desired types. func NewObjectClient(name string, cfg Config) (chunk.ObjectClient, error) { switch name { + case "aws", "s3": + return aws.NewS3ObjectClient(cfg.AWSStorageConfig.S3Config, chunk.DirDelim) + case "gcs": + return gcp.NewGCSObjectClient(context.Background(), cfg.GCSConfig, chunk.DirDelim) + case "azure": + return azure.NewBlobStorage(&cfg.AzureStorageConfig, chunk.DirDelim) case "inmemory": return chunk.NewMockStorage(), nil case "filesystem": return local.NewFSObjectClient(cfg.FSConfig) default: - return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: filesystem", name) + return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: aws, s3, gcs, azure, filesystem", name) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go index 0f6051a5a086..546dfa157846 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go @@ -17,12 +17,12 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/user" - "google.golang.org/grpc/health/grpc_health_v1" "github.com/cortexproject/cortex/pkg/ingester/client" ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/prom1/storage/metric" "github.com/cortexproject/cortex/pkg/ring" + ring_client "github.com/cortexproject/cortex/pkg/ring/client" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/limiter" @@ -42,11 +42,21 @@ var ( Name: "distributor_received_samples_total", Help: "The total number of received samples, excluding rejected and deduped samples.", }, []string{"user"}) + receivedMetadata = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "distributor_received_metadata_total", + Help: "The total number of received metadata, excluding rejected.", + }, []string{"user"}) incomingSamples = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "cortex", Name: "distributor_samples_in_total", Help: "The total number of samples that have come in to the distributor, including rejected or deduped samples.", }, []string{"user"}) + incomingMetadata = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "distributor_metadata_in_total", + Help: "The total number of metadata the have come in to the distributor, including rejected.", + }, []string{"user"}) nonHASamples = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "cortex", Name: "distributor_non_ha_samples_received_total", @@ -67,12 +77,12 @@ var ( Namespace: "cortex", Name: "distributor_ingester_appends_total", Help: "The total number of batch appends sent to ingesters.", - }, []string{"ingester"}) + }, []string{"ingester", "type"}) ingesterAppendFailures = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "cortex", Name: "distributor_ingester_append_failures_total", Help: "The total number of failed batch appends sent to ingesters.", - }, []string{"ingester"}) + }, []string{"ingester", "type"}) ingesterQueries = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "cortex", Name: "distributor_ingester_queries_total", @@ -88,9 +98,18 @@ var ( Name: "distributor_replication_factor", Help: "The configured replication factor.", }) + latestSeenSampleTimestampPerUser = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_distributor_latest_seen_sample_timestamp_seconds", + Help: "Unix timestamp of latest received sample per user.", + }, []string{"user"}) emptyPreallocSeries = ingester_client.PreallocTimeseries{} ) +const ( + typeSamples = "samples" + typeMetadata = "metadata" +) + // Distributor is a storage.SampleAppender and a client.Querier which // forwards appends and queries to individual ingesters. type Distributor struct { @@ -98,7 +117,7 @@ type Distributor struct { cfg Config ingestersRing ring.ReadRing - ingesterPool *ingester_client.Pool + ingesterPool *ring_client.Pool limits *validation.Overrides // The global rate limiter requires a distributors ring to count @@ -106,7 +125,7 @@ type Distributor struct { distributorsRing *ring.Lifecycler // For handling HA replicas. - Replicas *haTracker + HATracker *haTracker // Per-user rate limiter. ingestionRateLimiter *limiter.RateLimiter @@ -119,7 +138,7 @@ type Distributor struct { // Config contains the configuration require to // create a Distributor type Config struct { - PoolConfig ingester_client.PoolConfig `yaml:"pool"` + PoolConfig PoolConfig `yaml:"pool"` HATrackerConfig HATrackerConfig `yaml:"ha_tracker"` @@ -133,7 +152,7 @@ type Config struct { DistributorRing RingConfig `yaml:"ring"` // for testing - ingesterClientFactory client.Factory `yaml:"-"` + ingesterClientFactory ring_client.PoolFactory `yaml:"-"` } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -156,7 +175,7 @@ func (cfg *Config) Validate() error { // New constructs a new Distributor func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Overrides, ingestersRing ring.ReadRing, canJoinDistributorsRing bool) (*Distributor, error) { if cfg.ingesterClientFactory == nil { - cfg.ingesterClientFactory = func(addr string) (grpc_health_v1.HealthClient, error) { + cfg.ingesterClientFactory = func(addr string) (ring_client.PoolClient, error) { return ingester_client.MakeIngesterClient(addr, clientConfig) } } @@ -196,11 +215,11 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove d := &Distributor{ cfg: cfg, ingestersRing: ingestersRing, - ingesterPool: ingester_client.NewPool(cfg.PoolConfig, ingestersRing, cfg.ingesterClientFactory, util.Logger), + ingesterPool: NewPool(cfg.PoolConfig, ingestersRing, cfg.ingesterClientFactory, util.Logger), distributorsRing: distributorsRing, limits: limits, ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), - Replicas: replicas, + HATracker: replicas, } subservices = append(subservices, d.ingesterPool) @@ -246,17 +265,29 @@ func (d *Distributor) tokenForLabels(userID string, labels []client.LabelAdapter return shardByMetricName(userID, metricName), nil } +func (d *Distributor) tokenForMetadata(userID string, metricName string) uint32 { + if d.cfg.ShardByAllLabels { + return shardByMetricName(userID, metricName) + } + + return shardByUser(userID) +} + func shardByMetricName(userID string, metricName string) uint32 { + h := shardByUser(userID) + h = client.HashAdd32(h, metricName) + return h +} + +func shardByUser(userID string) uint32 { h := client.HashNew32() h = client.HashAdd32(h, userID) - h = client.HashAdd32(h, metricName) return h } // This function generates different values for different order of same labels. func shardByAllLabels(userID string, labels []client.LabelAdapter) uint32 { - h := client.HashNew32() - h = client.HashAdd32(h, userID) + h := shardByUser(userID) for _, label := range labels { h = client.HashAdd32(h, label.Name) h = client.HashAdd32(h, label.Value) @@ -287,7 +318,7 @@ func (d *Distributor) checkSample(ctx context.Context, userID, cluster, replica // At this point we know we have both HA labels, we should lookup // the cluster/instance here to see if we want to accept this sample. - err := d.Replicas.checkReplica(ctx, userID, cluster, replica) + err := d.HATracker.checkReplica(ctx, userID, cluster, replica) // checkReplica should only have returned an error if there was a real error talking to Consul, or if the replica labels don't match. if err != nil { // Don't accept the sample. return false, err @@ -329,7 +360,7 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie return nil, err } - var lastPartialErr error + var firstPartialErr error removeReplica := false numSamples := 0 @@ -338,6 +369,17 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie } // Count the total samples in, prior to validation or deduplication, for comparison with other metrics. incomingSamples.WithLabelValues(userID).Add(float64(numSamples)) + // Count the total number of metadata in. + incomingMetadata.WithLabelValues(userID).Add(float64(len(req.Metadata))) + + // A WriteRequest can only contain series or metadata but not both. This might change in the future. + // For each timeseries or samples, we compute a hash to distribute across ingesters; + // check each sample/metadata and discard if outside limits. + validatedTimeseries := make([]client.PreallocTimeseries, 0, len(req.Timeseries)) + validatedMetadata := make([]*client.MetricMetadata, 0, len(req.Metadata)) + metadataKeys := make([]uint32, 0, len(req.Metadata)) + seriesKeys := make([]uint32, 0, len(req.Timeseries)) + validatedSamples := 0 if d.limits.AcceptHASamples(userID) && len(req.Timeseries) > 0 { cluster, replica := findHALabels(d.limits.HAReplicaLabel(userID), d.limits.HAClusterLabel(userID), req.Timeseries[0].Labels) @@ -359,12 +401,22 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie } } + latestSampleTimestampMs := int64(0) + defer func() { + // Update this metric even in case of errors. + if latestSampleTimestampMs > 0 { + latestSeenSampleTimestampPerUser.WithLabelValues(userID).Set(float64(latestSampleTimestampMs) / 1000) + } + }() + // For each timeseries, compute a hash to distribute across ingesters; // check each sample and discard if outside limits. - validatedTimeseries := make([]client.PreallocTimeseries, 0, len(req.Timeseries)) - keys := make([]uint32, 0, len(req.Timeseries)) - validatedSamples := 0 for _, ts := range req.Timeseries { + // Use timestamp of latest sample in the series. If samples for series are not ordered, metric for user may be wrong. + if len(ts.Samples) > 0 { + latestSampleTimestampMs = util.Max64(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].TimestampMs) + } + // If we found both the cluster and replica labels, we only want to include the cluster label when // storing series in Cortex. If we kept the replica label we would end up with another series for the same // series we're trying to dedupe when HA tracking moves over to a different replica. @@ -398,8 +450,8 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie // Errors in validation are considered non-fatal, as one series in a request may contain // invalid data but all the remaining series could be perfectly valid. - if err != nil { - lastPartialErr = err + if err != nil && firstPartialErr == nil { + firstPartialErr = err } // validateSeries would have returned an emptyPreallocSeries if there were no valid samples. @@ -407,28 +459,47 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie continue } - keys = append(keys, key) + seriesKeys = append(seriesKeys, key) validatedTimeseries = append(validatedTimeseries, validatedSeries) validatedSamples += len(ts.Samples) } + + for _, m := range req.Metadata { + err := validation.ValidateMetadata(d.limits, userID, m) + + if err != nil { + if firstPartialErr == nil { + firstPartialErr = err + } + + continue + } + + metadataKeys = append(metadataKeys, d.tokenForMetadata(userID, m.MetricName)) + validatedMetadata = append(validatedMetadata, m) + } + receivedSamples.WithLabelValues(userID).Add(float64(validatedSamples)) + receivedMetadata.WithLabelValues(userID).Add(float64(len(validatedMetadata))) - if len(keys) == 0 { - // Ensure the request slice is reused if there's no series passing the validation. + if len(seriesKeys) == 0 && len(metadataKeys) == 0 { + // Ensure the request slice is reused if there's no series or metadata passing the validation. client.ReuseSlice(req.Timeseries) - return &client.WriteResponse{}, lastPartialErr + return &client.WriteResponse{}, firstPartialErr } now := time.Now() - if !d.ingestionRateLimiter.AllowN(now, userID, validatedSamples) { + totalN := validatedSamples + len(validatedMetadata) + if !d.ingestionRateLimiter.AllowN(now, userID, totalN) { // Ensure the request slice is reused if the request is rate limited. client.ReuseSlice(req.Timeseries) // Return a 4xx here to have the client discard the data and not retry. If a client // is sending too much data consistently we will unlikely ever catch up otherwise. validation.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedSamples)) - return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%v) exceeded while adding %d samples", d.ingestionRateLimiter.Limit(now, userID), numSamples) + validation.DiscardedMetadata.WithLabelValues(validation.RateLimited, userID).Add(float64(len(validatedMetadata))) + return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%v) exceeded while adding %d samples and %d metadata", d.ingestionRateLimiter.Limit(now, userID), validatedSamples, len(validatedMetadata)) } var subRing ring.ReadRing @@ -443,10 +514,19 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie } } + keys := append(seriesKeys, metadataKeys...) + initialMetadataIndex := len(seriesKeys) + err = ring.DoBatch(ctx, subRing, keys, func(ingester ring.IngesterDesc, indexes []int) error { timeseries := make([]client.PreallocTimeseries, 0, len(indexes)) + var metadata []*client.MetricMetadata + for _, i := range indexes { - timeseries = append(timeseries, validatedTimeseries[i]) + if i >= initialMetadataIndex { + metadata = append(metadata, validatedMetadata[i-initialMetadataIndex]) + } else { + timeseries = append(timeseries, validatedTimeseries[i]) + } } // Use a background context to make sure all ingesters get samples even if we return early @@ -456,12 +536,12 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie if sp := opentracing.SpanFromContext(ctx); sp != nil { localCtx = opentracing.ContextWithSpan(localCtx, sp) } - return d.sendSamples(localCtx, ingester, timeseries) + return d.send(localCtx, ingester, timeseries, metadata, req.Source) }, func() { client.ReuseSlice(req.Timeseries) }) if err != nil { return nil, err } - return &client.WriteResponse{}, lastPartialErr + return &client.WriteResponse{}, firstPartialErr } func sortLabelsIfNeeded(labels []client.LabelAdapter) { @@ -486,7 +566,7 @@ func sortLabelsIfNeeded(labels []client.LabelAdapter) { }) } -func (d *Distributor) sendSamples(ctx context.Context, ingester ring.IngesterDesc, timeseries []client.PreallocTimeseries) error { +func (d *Distributor) send(ctx context.Context, ingester ring.IngesterDesc, timeseries []client.PreallocTimeseries, metadata []*client.MetricMetadata, source client.WriteRequest_SourceEnum) error { h, err := d.ingesterPool.GetClientFor(ingester.Addr) if err != nil { return err @@ -495,13 +575,24 @@ func (d *Distributor) sendSamples(ctx context.Context, ingester ring.IngesterDes req := client.WriteRequest{ Timeseries: timeseries, + Metadata: metadata, + Source: source, } _, err = c.Push(ctx, &req) - ingesterAppends.WithLabelValues(ingester.Addr).Inc() - if err != nil { - ingesterAppendFailures.WithLabelValues(ingester.Addr).Inc() + if len(metadata) > 0 { + ingesterAppends.WithLabelValues(ingester.Addr, typeMetadata).Inc() + if err != nil { + ingesterAppendFailures.WithLabelValues(ingester.Addr, typeMetadata).Inc() + } + } + if len(timeseries) > 0 { + ingesterAppends.WithLabelValues(ingester.Addr, typeSamples).Inc() + if err != nil { + ingesterAppendFailures.WithLabelValues(ingester.Addr, typeSamples).Inc() + } } + return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ingester_client_pool.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ingester_client_pool.go new file mode 100644 index 000000000000..e8b6e03a4092 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/ingester_client_pool.go @@ -0,0 +1,42 @@ +package distributor + +import ( + "flag" + "time" + + "github.com/go-kit/kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/cortexproject/cortex/pkg/ring" + ring_client "github.com/cortexproject/cortex/pkg/ring/client" +) + +var clients = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: "cortex", + Name: "distributor_ingester_clients", + Help: "The current number of ingester clients.", +}) + +// PoolConfig is config for creating a Pool. +type PoolConfig struct { + ClientCleanupPeriod time.Duration `yaml:"client_cleanup_period"` + HealthCheckIngesters bool `yaml:"health_check_ingesters"` + RemoteTimeout time.Duration `yaml:"-"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet. +func (cfg *PoolConfig) RegisterFlags(f *flag.FlagSet) { + f.DurationVar(&cfg.ClientCleanupPeriod, "distributor.client-cleanup-period", 15*time.Second, "How frequently to clean up clients for ingesters that have gone away.") + f.BoolVar(&cfg.HealthCheckIngesters, "distributor.health-check-ingesters", true, "Run a health check on each ingester client during periodic cleanup.") +} + +func NewPool(cfg PoolConfig, ring ring.ReadRing, factory ring_client.PoolFactory, logger log.Logger) *ring_client.Pool { + poolCfg := ring_client.PoolConfig{ + CheckInterval: cfg.ClientCleanupPeriod, + HealthCheckEnabled: cfg.HealthCheckIngesters, + HealthCheckTimeout: cfg.RemoteTimeout, + } + + return ring_client.NewPool("ingester", poolCfg, ring, factory, clients, logger) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go index 33468f487e0d..c28269c11ba2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go @@ -3,17 +3,13 @@ package client import ( "flag" - otgrpc "github.com/opentracing-contrib/go-grpc" - opentracing "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/middleware" "google.golang.org/grpc" _ "google.golang.org/grpc/encoding/gzip" // get gzip compressor registered "google.golang.org/grpc/health/grpc_health_v1" "github.com/cortexproject/cortex/pkg/util/grpcclient" - cortex_middleware "github.com/cortexproject/cortex/pkg/util/middleware" ) var ingesterClientRequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ @@ -36,22 +32,10 @@ type closableHealthAndIngesterClient struct { conn *grpc.ClientConn } -func instrumentation() ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { - return []grpc.UnaryClientInterceptor{ - otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), - middleware.ClientUserHeaderInterceptor, - cortex_middleware.PrometheusGRPCUnaryInstrumentation(ingesterClientRequestDuration), - }, []grpc.StreamClientInterceptor{ - otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()), - middleware.StreamClientUserHeaderInterceptor, - cortex_middleware.PrometheusGRPCStreamInstrumentation(ingesterClientRequestDuration), - } -} - // MakeIngesterClient makes a new IngesterClient func MakeIngesterClient(addr string, cfg Config) (HealthAndIngesterClient, error) { opts := []grpc.DialOption{grpc.WithInsecure()} - opts = append(opts, cfg.GRPCClientConfig.DialOption(instrumentation())...) + opts = append(opts, cfg.GRPCClientConfig.DialOption(grpcclient.Instrument(ingesterClientRequestDuration))...) conn, err := grpc.Dial(addr, opts...) if err != nil { return nil, err @@ -74,5 +58,5 @@ type Config struct { // RegisterFlags registers configuration settings used by the ingester client config. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.GRPCClientConfig.RegisterFlags("ingester.client", f) + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("ingester.client", f) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go index 3e308f1ded71..bacf98117fba 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go @@ -18,11 +18,12 @@ import ( var json = jsoniter.ConfigCompatibleWithStandardLibrary -// ToWriteRequest converts matched slices of Labels and Samples into a WriteRequest proto. +// ToWriteRequest converts matched slices of Labels, Samples and Metadata into a WriteRequest proto. // It gets timeseries from the pool, so ReuseSlice() should be called when done. -func ToWriteRequest(lbls []labels.Labels, samples []Sample, source WriteRequest_SourceEnum) *WriteRequest { +func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMetadata, source WriteRequest_SourceEnum) *WriteRequest { req := &WriteRequest{ Timeseries: slicePool.Get().([]PreallocTimeseries), + Metadata: metadata, Source: source, } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go index d87bd4ced457..7441a4a7244b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go @@ -80,9 +80,49 @@ func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor_893a47d0a749d749, []int{0, 0} } +type MetricMetadata_MetricType int32 + +const ( + UNKNOWN MetricMetadata_MetricType = 0 + COUNTER MetricMetadata_MetricType = 1 + GAUGE MetricMetadata_MetricType = 2 + HISTOGRAM MetricMetadata_MetricType = 3 + GAUGEHISTOGRAM MetricMetadata_MetricType = 4 + SUMMARY MetricMetadata_MetricType = 5 + INFO MetricMetadata_MetricType = 6 + STATESET MetricMetadata_MetricType = 7 +) + +var MetricMetadata_MetricType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "COUNTER", + 2: "GAUGE", + 3: "HISTOGRAM", + 4: "GAUGEHISTOGRAM", + 5: "SUMMARY", + 6: "INFO", + 7: "STATESET", +} + +var MetricMetadata_MetricType_value = map[string]int32{ + "UNKNOWN": 0, + "COUNTER": 1, + "GAUGE": 2, + "HISTOGRAM": 3, + "GAUGEHISTOGRAM": 4, + "SUMMARY": 5, + "INFO": 6, + "STATESET": 7, +} + +func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{24, 0} +} + type WriteRequest struct { Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"` Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,json=source,proto3,enum=cortex.WriteRequest_SourceEnum" json:"Source,omitempty"` + Metadata []*MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"` } func (m *WriteRequest) Reset() { *m = WriteRequest{} } @@ -124,6 +164,13 @@ func (m *WriteRequest) GetSource() WriteRequest_SourceEnum { return API } +func (m *WriteRequest) GetMetadata() []*MetricMetadata { + if m != nil { + return m.Metadata + } + return nil +} + type WriteResponse struct { } @@ -1213,6 +1260,73 @@ func (m *LabelMatchers) GetMatchers() []*LabelMatcher { return nil } +type MetricMetadata struct { + Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MetricMetadata_MetricType" json:"type,omitempty"` + MetricName string `protobuf:"bytes,2,opt,name=metric_name,json=metricName,proto3" json:"metric_name,omitempty"` + Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (m *MetricMetadata) Reset() { *m = MetricMetadata{} } +func (*MetricMetadata) ProtoMessage() {} +func (*MetricMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{24} +} +func (m *MetricMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricMetadata.Merge(m, src) +} +func (m *MetricMetadata) XXX_Size() int { + return m.Size() +} +func (m *MetricMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_MetricMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricMetadata proto.InternalMessageInfo + +func (m *MetricMetadata) GetType() MetricMetadata_MetricType { + if m != nil { + return m.Type + } + return UNKNOWN +} + +func (m *MetricMetadata) GetMetricName() string { + if m != nil { + return m.MetricName + } + return "" +} + +func (m *MetricMetadata) GetHelp() string { + if m != nil { + return m.Help + } + return "" +} + +func (m *MetricMetadata) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + type Metric struct { Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` } @@ -1220,7 +1334,7 @@ type Metric struct { func (m *Metric) Reset() { *m = Metric{} } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{24} + return fileDescriptor_893a47d0a749d749, []int{25} } func (m *Metric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1258,7 +1372,7 @@ type LabelMatcher struct { func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{25} + return fileDescriptor_893a47d0a749d749, []int{26} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1318,7 +1432,7 @@ type TimeSeriesFile struct { func (m *TimeSeriesFile) Reset() { *m = TimeSeriesFile{} } func (*TimeSeriesFile) ProtoMessage() {} func (*TimeSeriesFile) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{26} + return fileDescriptor_893a47d0a749d749, []int{27} } func (m *TimeSeriesFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1381,7 +1495,7 @@ type TransferTSDBResponse struct { func (m *TransferTSDBResponse) Reset() { *m = TransferTSDBResponse{} } func (*TransferTSDBResponse) ProtoMessage() {} func (*TransferTSDBResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{27} + return fileDescriptor_893a47d0a749d749, []int{28} } func (m *TransferTSDBResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1413,6 +1527,7 @@ var xxx_messageInfo_TransferTSDBResponse proto.InternalMessageInfo func init() { proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value) proto.RegisterEnum("cortex.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value) + proto.RegisterEnum("cortex.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) proto.RegisterType((*WriteRequest)(nil), "cortex.WriteRequest") proto.RegisterType((*WriteResponse)(nil), "cortex.WriteResponse") proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest") @@ -1437,6 +1552,7 @@ func init() { proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair") proto.RegisterType((*Sample)(nil), "cortex.Sample") proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers") + proto.RegisterType((*MetricMetadata)(nil), "cortex.MetricMetadata") proto.RegisterType((*Metric)(nil), "cortex.Metric") proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher") proto.RegisterType((*TimeSeriesFile)(nil), "cortex.TimeSeriesFile") @@ -1446,87 +1562,98 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 1275 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xdf, 0x89, 0x3f, 0x12, 0x3f, 0x6f, 0x5c, 0x67, 0x92, 0xb6, 0xe9, 0x16, 0x36, 0x65, 0xa4, - 0x16, 0x0b, 0xa8, 0x5b, 0x52, 0x15, 0x7a, 0x00, 0x55, 0x4e, 0xeb, 0xb6, 0x46, 0x49, 0x9a, 0xae, - 0x5d, 0x40, 0x48, 0xc8, 0xda, 0xd8, 0x93, 0x64, 0xc5, 0x7e, 0xb8, 0x3b, 0xb3, 0x88, 0x1e, 0x90, - 0x90, 0xf8, 0x03, 0xe8, 0x91, 0x3f, 0x81, 0x33, 0x17, 0x38, 0x73, 0xea, 0xb1, 0xc7, 0x8a, 0x43, - 0x45, 0x9d, 0x0b, 0xc7, 0xfe, 0x09, 0x68, 0x67, 0x66, 0xd7, 0xbb, 0xae, 0x2d, 0xca, 0x47, 0x6f, - 0x9e, 0xf7, 0x7e, 0xef, 0xb7, 0xef, 0x73, 0xe6, 0x19, 0xf4, 0x41, 0x10, 0x72, 0xfa, 0x4d, 0x73, - 0x14, 0x06, 0x3c, 0xc0, 0x65, 0x79, 0x32, 0x2e, 0x1e, 0x3a, 0xfc, 0x28, 0xda, 0x6f, 0x0e, 0x02, - 0xef, 0xd2, 0x61, 0x70, 0x18, 0x5c, 0x12, 0xea, 0xfd, 0xe8, 0x40, 0x9c, 0xc4, 0x41, 0xfc, 0x92, - 0x66, 0xe4, 0x57, 0x04, 0xfa, 0x67, 0xa1, 0xc3, 0xa9, 0x45, 0x1f, 0x44, 0x94, 0x71, 0xbc, 0x0b, - 0xc0, 0x1d, 0x8f, 0x32, 0x1a, 0x3a, 0x94, 0xad, 0xa3, 0x73, 0x85, 0x46, 0x75, 0x13, 0x37, 0xd5, - 0xa7, 0x7a, 0x8e, 0x47, 0xbb, 0x42, 0xb3, 0x65, 0x3c, 0x7e, 0xb6, 0xa1, 0xfd, 0xfe, 0x6c, 0x03, - 0xef, 0x85, 0xd4, 0x76, 0xdd, 0x60, 0xd0, 0x4b, 0xad, 0xac, 0x0c, 0x03, 0xfe, 0x10, 0xca, 0xdd, - 0x20, 0x0a, 0x07, 0x74, 0x7d, 0xe1, 0x1c, 0x6a, 0xd4, 0x36, 0x37, 0x12, 0xae, 0xec, 0x57, 0x9b, - 0x12, 0xd2, 0xf6, 0x23, 0xcf, 0x2a, 0x33, 0xf1, 0x9b, 0x6c, 0x00, 0x4c, 0xa4, 0x78, 0x11, 0x0a, - 0xad, 0xbd, 0x4e, 0x5d, 0xc3, 0x4b, 0x50, 0xb4, 0xee, 0x6f, 0xb7, 0xeb, 0x88, 0x9c, 0x80, 0x65, - 0xc5, 0xc1, 0x46, 0x81, 0xcf, 0x28, 0xf9, 0x18, 0xaa, 0x16, 0xb5, 0x87, 0x49, 0x24, 0x4d, 0x58, - 0x7c, 0x10, 0x65, 0xc3, 0x58, 0x4b, 0x3e, 0x7d, 0x2f, 0xa2, 0xe1, 0x43, 0x05, 0xb3, 0x12, 0x10, - 0xb9, 0x0e, 0xba, 0x34, 0x97, 0x74, 0xf8, 0x12, 0x2c, 0x86, 0x94, 0x45, 0x2e, 0x4f, 0xec, 0x4f, - 0x4e, 0xd9, 0x4b, 0x9c, 0x95, 0xa0, 0xc8, 0x8f, 0x08, 0xf4, 0x2c, 0x35, 0x7e, 0x0f, 0x30, 0xe3, - 0x76, 0xc8, 0xfb, 0x22, 0x1f, 0xdc, 0xf6, 0x46, 0x7d, 0x2f, 0x26, 0x43, 0x8d, 0x82, 0x55, 0x17, - 0x9a, 0x5e, 0xa2, 0xd8, 0x61, 0xb8, 0x01, 0x75, 0xea, 0x0f, 0xf3, 0xd8, 0x05, 0x81, 0xad, 0x51, - 0x7f, 0x98, 0x45, 0x5e, 0x86, 0x25, 0xcf, 0xe6, 0x83, 0x23, 0x1a, 0xb2, 0xf5, 0x42, 0x3e, 0xb4, - 0x6d, 0x7b, 0x9f, 0xba, 0x3b, 0x52, 0x69, 0xa5, 0x28, 0xd2, 0x81, 0xe5, 0x9c, 0xd3, 0xf8, 0xda, - 0x2b, 0x96, 0xb9, 0x18, 0x97, 0x39, 0x5b, 0x50, 0xf2, 0x08, 0xc1, 0xaa, 0xe0, 0xea, 0xf2, 0x90, - 0xda, 0x5e, 0xca, 0x78, 0x1d, 0xaa, 0x83, 0xa3, 0xc8, 0xff, 0x2a, 0x47, 0x79, 0xfa, 0x65, 0xca, - 0x1b, 0x31, 0x48, 0xf1, 0x66, 0x2d, 0xa6, 0x5c, 0x5a, 0xf8, 0x07, 0x2e, 0x5d, 0x01, 0x2c, 0xe2, - 0xfe, 0xd4, 0x76, 0x23, 0xca, 0x92, 0xec, 0xbf, 0x09, 0xe0, 0xc6, 0xd2, 0xbe, 0x6f, 0x7b, 0x54, - 0x64, 0xbd, 0x62, 0x55, 0x84, 0x64, 0xd7, 0xf6, 0x28, 0xb9, 0x06, 0xab, 0x39, 0x23, 0x15, 0xc6, - 0x5b, 0xa0, 0x4b, 0xab, 0xaf, 0x85, 0x5c, 0xc4, 0x51, 0xb1, 0xaa, 0xee, 0x04, 0x4a, 0x56, 0x61, - 0x65, 0x3b, 0xa1, 0x49, 0xbe, 0x46, 0xae, 0x2a, 0x1f, 0x94, 0x50, 0xb1, 0x6d, 0x40, 0x75, 0xe2, - 0x43, 0x42, 0x06, 0xa9, 0x13, 0x8c, 0x60, 0xa8, 0xdf, 0x67, 0x34, 0xec, 0x72, 0x9b, 0xa7, 0x54, - 0xbf, 0x20, 0x58, 0xc9, 0x08, 0x15, 0xd5, 0x79, 0xa8, 0x39, 0xfe, 0x21, 0x65, 0xdc, 0x09, 0xfc, - 0x7e, 0x68, 0x73, 0x19, 0x12, 0xb2, 0x96, 0x53, 0xa9, 0x65, 0x73, 0x1a, 0x47, 0xed, 0x47, 0x5e, - 0x3f, 0xcd, 0x22, 0x6a, 0x14, 0xad, 0x8a, 0x1f, 0x79, 0x32, 0x79, 0x71, 0x4b, 0xda, 0x23, 0xa7, - 0x3f, 0xc5, 0x54, 0x10, 0x4c, 0x75, 0x7b, 0xe4, 0x74, 0x72, 0x64, 0x4d, 0x58, 0x0d, 0x23, 0x97, - 0x4e, 0xc3, 0x8b, 0x02, 0xbe, 0x12, 0xab, 0x72, 0x78, 0xf2, 0x25, 0xac, 0xc6, 0x8e, 0x77, 0x6e, - 0xe6, 0x5d, 0x3f, 0x0d, 0x8b, 0x11, 0xa3, 0x61, 0xdf, 0x19, 0xaa, 0x32, 0x94, 0xe3, 0x63, 0x67, - 0x88, 0x2f, 0x42, 0x71, 0x68, 0x73, 0x5b, 0xb8, 0x59, 0xdd, 0x3c, 0x93, 0x14, 0xfb, 0xa5, 0xe0, - 0x2d, 0x01, 0x23, 0xb7, 0x01, 0xc7, 0x2a, 0x96, 0x67, 0x7f, 0x1f, 0x4a, 0x2c, 0x16, 0xa8, 0x96, - 0x3b, 0x9b, 0x65, 0x99, 0xf2, 0xc4, 0x92, 0x48, 0xf2, 0x33, 0x02, 0x73, 0x87, 0xf2, 0xd0, 0x19, - 0xb0, 0x5b, 0x41, 0x98, 0x9d, 0x19, 0xf6, 0xba, 0x67, 0xf7, 0x1a, 0xe8, 0xc9, 0x54, 0xf6, 0x19, - 0xe5, 0x6a, 0x7e, 0x4f, 0xce, 0x9a, 0x5f, 0x66, 0x55, 0x13, 0x68, 0x97, 0x72, 0xd2, 0x81, 0x8d, - 0xb9, 0x3e, 0xab, 0x54, 0x5c, 0x80, 0xb2, 0x27, 0x20, 0x2a, 0x17, 0xb5, 0x84, 0x56, 0x1a, 0x5a, - 0x4a, 0x4b, 0x7e, 0x43, 0x70, 0x62, 0x6a, 0x22, 0xe3, 0x10, 0x0e, 0xc2, 0xc0, 0x53, 0xb5, 0xce, - 0x56, 0xab, 0x16, 0xcb, 0x3b, 0x4a, 0xdc, 0x19, 0x66, 0xcb, 0xb9, 0x90, 0x2b, 0xe7, 0x75, 0x28, - 0x8b, 0xd6, 0x4e, 0x6e, 0xa5, 0x95, 0x5c, 0x54, 0x7b, 0xb6, 0x13, 0x6e, 0xad, 0xa9, 0x67, 0x43, - 0x17, 0xa2, 0xd6, 0xd0, 0x1e, 0x71, 0x1a, 0x5a, 0xca, 0x0c, 0xbf, 0x0b, 0x65, 0x79, 0x23, 0xac, - 0x17, 0x05, 0xc1, 0x72, 0x42, 0x90, 0xbd, 0x34, 0x14, 0x84, 0xfc, 0x80, 0xa0, 0x24, 0x5d, 0x7f, - 0x5d, 0xb5, 0x32, 0x60, 0x89, 0xfa, 0x83, 0x60, 0xe8, 0xf8, 0x87, 0x62, 0x44, 0x4a, 0x56, 0x7a, - 0xc6, 0x58, 0xb5, 0x6e, 0x3c, 0x0b, 0xba, 0xea, 0xcf, 0x75, 0x38, 0xd5, 0x0b, 0x6d, 0x9f, 0x1d, - 0xd0, 0x50, 0x38, 0x96, 0x16, 0x86, 0x7c, 0x0b, 0x30, 0xc9, 0x77, 0x26, 0x4f, 0xe8, 0xdf, 0xe5, - 0xa9, 0x09, 0x8b, 0xcc, 0xf6, 0x46, 0x6e, 0x7a, 0x4f, 0xa6, 0x85, 0xee, 0x0a, 0xb1, 0xca, 0x54, - 0x02, 0x22, 0x57, 0xa1, 0x92, 0x52, 0xc7, 0x9e, 0xa7, 0x37, 0xa2, 0x6e, 0x89, 0xdf, 0x78, 0x0d, - 0x4a, 0xe2, 0xbe, 0x13, 0x89, 0xd0, 0x2d, 0x79, 0x20, 0x2d, 0x28, 0x4b, 0xbe, 0x89, 0x5e, 0xde, - 0x39, 0xf2, 0x10, 0xdf, 0x95, 0x33, 0xb2, 0x58, 0xe5, 0x93, 0x14, 0x92, 0x16, 0x2c, 0xe7, 0x5a, - 0x35, 0xf7, 0x76, 0xa1, 0x57, 0x7c, 0xbb, 0xca, 0xb2, 0x7d, 0xff, 0x73, 0xde, 0x48, 0x1f, 0xf4, - 0xec, 0x47, 0xf0, 0x79, 0x28, 0xf2, 0x87, 0x23, 0x19, 0x55, 0x6d, 0x42, 0x27, 0xd4, 0xbd, 0x87, - 0x23, 0x6a, 0x09, 0x75, 0x9a, 0x31, 0xd9, 0xed, 0x53, 0x19, 0x2b, 0x08, 0xa1, 0xca, 0xd8, 0xf7, - 0x08, 0x6a, 0x93, 0x42, 0xdf, 0x72, 0x5c, 0xfa, 0x7f, 0xcc, 0x95, 0x01, 0x4b, 0x07, 0x8e, 0x4b, - 0x85, 0x0f, 0xf2, 0x73, 0xe9, 0x79, 0x66, 0x1f, 0x9e, 0x82, 0xb5, 0xa4, 0x0f, 0x7b, 0xdd, 0x9b, - 0x5b, 0x49, 0x17, 0xbe, 0xf3, 0x09, 0x54, 0xd2, 0xd0, 0x70, 0x05, 0x4a, 0xed, 0x7b, 0xf7, 0x5b, - 0xdb, 0x75, 0x0d, 0x2f, 0x43, 0x65, 0xf7, 0x6e, 0xaf, 0x2f, 0x8f, 0x08, 0x9f, 0x80, 0xaa, 0xd5, - 0xbe, 0xdd, 0xfe, 0xbc, 0xbf, 0xd3, 0xea, 0xdd, 0xb8, 0x53, 0x5f, 0xc0, 0x18, 0x6a, 0x52, 0xb0, - 0x7b, 0x57, 0xc9, 0x0a, 0x9b, 0xc7, 0x25, 0x58, 0x4a, 0x7c, 0xc7, 0x57, 0xa1, 0xb8, 0x17, 0xb1, - 0x23, 0xbc, 0x36, 0x6b, 0xb9, 0x33, 0x4e, 0x4e, 0x49, 0xd5, 0x4c, 0x68, 0xf8, 0x03, 0x28, 0x89, - 0x4d, 0x02, 0xcf, 0xdc, 0xcc, 0x8c, 0xd9, 0xfb, 0x16, 0xd1, 0xf0, 0x4d, 0xa8, 0x66, 0x36, 0x90, - 0x39, 0xd6, 0x67, 0x73, 0xd2, 0xfc, 0xb2, 0x42, 0xb4, 0xcb, 0x08, 0xdf, 0x81, 0x6a, 0x66, 0x01, - 0xc0, 0x46, 0xae, 0x99, 0x72, 0xab, 0xc4, 0x84, 0x6b, 0xc6, 0xc6, 0x40, 0x34, 0xdc, 0x06, 0x98, - 0xbc, 0xfd, 0xf8, 0x4c, 0x0e, 0x9c, 0x5d, 0x12, 0x0c, 0x63, 0x96, 0x2a, 0xa5, 0xd9, 0x82, 0x4a, - 0xfa, 0xf2, 0xe1, 0xf5, 0x19, 0x8f, 0xa1, 0x24, 0x99, 0xff, 0x4c, 0x12, 0x0d, 0xdf, 0x02, 0xbd, - 0xe5, 0xba, 0xaf, 0x42, 0x63, 0x64, 0x35, 0x6c, 0x9a, 0xc7, 0x85, 0xd3, 0x73, 0x1e, 0x1b, 0x7c, - 0x21, 0xff, 0xa8, 0xcc, 0x7b, 0x41, 0x8d, 0xb7, 0xff, 0x16, 0x97, 0x7e, 0x6d, 0x07, 0x6a, 0xf9, - 0x8b, 0x13, 0xcf, 0x5b, 0x1c, 0x0d, 0x33, 0x55, 0xcc, 0xbe, 0x69, 0xb5, 0x46, 0x5c, 0x59, 0x3d, - 0xdb, 0xff, 0xf8, 0xd4, 0xcb, 0x64, 0xf1, 0x68, 0x1a, 0x6f, 0x4c, 0x73, 0x65, 0xa7, 0x25, 0x66, - 0xda, 0xfa, 0xe8, 0xc9, 0x73, 0x53, 0x7b, 0xfa, 0xdc, 0xd4, 0x5e, 0x3c, 0x37, 0xd1, 0x77, 0x63, - 0x13, 0xfd, 0x34, 0x36, 0xd1, 0xe3, 0xb1, 0x89, 0x9e, 0x8c, 0x4d, 0xf4, 0xc7, 0xd8, 0x44, 0x7f, - 0x8e, 0x4d, 0xed, 0xc5, 0xd8, 0x44, 0x8f, 0x8e, 0x4d, 0xed, 0xc9, 0xb1, 0xa9, 0x3d, 0x3d, 0x36, - 0xb5, 0x2f, 0xca, 0x03, 0xd7, 0xa1, 0x3e, 0xdf, 0x2f, 0x8b, 0xff, 0x58, 0x57, 0xfe, 0x0a, 0x00, - 0x00, 0xff, 0xff, 0x82, 0xee, 0x79, 0x78, 0xaa, 0x0d, 0x00, 0x00, + // 1444 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4b, 0x6f, 0xdb, 0xc6, + 0x16, 0x26, 0xad, 0x87, 0xad, 0x23, 0x5a, 0xa1, 0xc7, 0x8e, 0xe3, 0x28, 0xf7, 0x52, 0xc9, 0x00, + 0xc9, 0x35, 0xee, 0xbd, 0x51, 0x52, 0x07, 0x69, 0xbd, 0x68, 0x11, 0xc8, 0x89, 0xec, 0xa8, 0xb5, + 0x64, 0x67, 0x24, 0x35, 0x6d, 0x81, 0x42, 0xa0, 0xa5, 0xb1, 0x4d, 0x94, 0xa4, 0x14, 0x3e, 0x8a, + 0x7a, 0x51, 0xa0, 0x40, 0x97, 0x5d, 0x34, 0xcb, 0xfe, 0x84, 0xae, 0xbb, 0xe9, 0xbe, 0xab, 0x2c, + 0xb3, 0x0c, 0xba, 0x08, 0x1a, 0x79, 0xd3, 0x65, 0xd0, 0x5f, 0x50, 0xcc, 0x83, 0x14, 0xa9, 0xc8, + 0x68, 0xfa, 0xc8, 0x8e, 0x73, 0xce, 0x37, 0xdf, 0x9c, 0x39, 0xaf, 0x39, 0x04, 0xad, 0x3f, 0xf4, + 0x02, 0xfa, 0x45, 0x75, 0xe4, 0x0d, 0x83, 0x21, 0xca, 0x8b, 0x55, 0xf9, 0xfa, 0x91, 0x15, 0x1c, + 0x87, 0x07, 0xd5, 0xfe, 0xd0, 0xb9, 0x71, 0x34, 0x3c, 0x1a, 0xde, 0xe0, 0xea, 0x83, 0xf0, 0x90, + 0xaf, 0xf8, 0x82, 0x7f, 0x89, 0x6d, 0xf8, 0x37, 0x15, 0xb4, 0x87, 0x9e, 0x15, 0x50, 0x42, 0x1f, + 0x85, 0xd4, 0x0f, 0x50, 0x0b, 0x20, 0xb0, 0x1c, 0xea, 0x53, 0xcf, 0xa2, 0xfe, 0x9a, 0x7a, 0x39, + 0xb3, 0x5e, 0xdc, 0x40, 0x55, 0x79, 0x54, 0xc7, 0x72, 0x68, 0x9b, 0x6b, 0xb6, 0xca, 0x4f, 0x9e, + 0x57, 0x94, 0x9f, 0x9f, 0x57, 0xd0, 0xbe, 0x47, 0x4d, 0xdb, 0x1e, 0xf6, 0x3b, 0xf1, 0x2e, 0x92, + 0x60, 0x40, 0xef, 0x40, 0xbe, 0x3d, 0x0c, 0xbd, 0x3e, 0x5d, 0x9b, 0xbb, 0xac, 0xae, 0x97, 0x36, + 0x2a, 0x11, 0x57, 0xf2, 0xd4, 0xaa, 0x80, 0xd4, 0xdd, 0xd0, 0x21, 0x79, 0x9f, 0x7f, 0xa3, 0x4d, + 0x58, 0x70, 0x68, 0x60, 0x0e, 0xcc, 0xc0, 0x5c, 0xcb, 0x70, 0x33, 0x56, 0xa3, 0xad, 0x4d, 0x1a, + 0x78, 0x56, 0xbf, 0x29, 0xb5, 0x5b, 0xd9, 0x27, 0xcf, 0x2b, 0x2a, 0x89, 0xd1, 0xb8, 0x02, 0x30, + 0xe1, 0x43, 0xf3, 0x90, 0xa9, 0xed, 0x37, 0x74, 0x05, 0x2d, 0x40, 0x96, 0x74, 0x77, 0xeb, 0xba, + 0x8a, 0xcf, 0xc1, 0xa2, 0x3c, 0xdd, 0x1f, 0x0d, 0x5d, 0x9f, 0xe2, 0xf7, 0xa0, 0x48, 0xa8, 0x39, + 0x88, 0x7c, 0x50, 0x85, 0xf9, 0x47, 0x61, 0xd2, 0x01, 0x2b, 0xd1, 0xc9, 0x0f, 0x42, 0xea, 0x9d, + 0x48, 0x18, 0x89, 0x40, 0xf8, 0x0e, 0x68, 0x62, 0xbb, 0xa0, 0x43, 0x37, 0x60, 0xde, 0xa3, 0x7e, + 0x68, 0x07, 0xd1, 0xfe, 0xf3, 0x53, 0xfb, 0x05, 0x8e, 0x44, 0x28, 0xfc, 0x9d, 0x0a, 0x5a, 0x92, + 0x1a, 0xfd, 0x1f, 0x90, 0x1f, 0x98, 0x5e, 0xd0, 0xe3, 0x9e, 0x0c, 0x4c, 0x67, 0xd4, 0x73, 0x18, + 0x99, 0xba, 0x9e, 0x21, 0x3a, 0xd7, 0x74, 0x22, 0x45, 0xd3, 0x47, 0xeb, 0xa0, 0x53, 0x77, 0x90, + 0xc6, 0xce, 0x71, 0x6c, 0x89, 0xba, 0x83, 0x24, 0xf2, 0x26, 0x2c, 0x38, 0x66, 0xd0, 0x3f, 0xa6, + 0x9e, 0x2f, 0x9d, 0x1a, 0x5f, 0x6d, 0xd7, 0x3c, 0xa0, 0x76, 0x53, 0x28, 0x49, 0x8c, 0xc2, 0x0d, + 0x58, 0x4c, 0x19, 0x8d, 0x36, 0x5f, 0x33, 0x41, 0x58, 0x54, 0x94, 0x64, 0x2a, 0xe0, 0xc7, 0x2a, + 0x2c, 0x73, 0xae, 0x76, 0xe0, 0x51, 0xd3, 0x89, 0x19, 0xef, 0x40, 0xb1, 0x7f, 0x1c, 0xba, 0x9f, + 0xa5, 0x28, 0x2f, 0xbc, 0x4a, 0x79, 0x97, 0x81, 0x24, 0x6f, 0x72, 0xc7, 0x94, 0x49, 0x73, 0x7f, + 0xc2, 0xa4, 0x5b, 0x80, 0xf8, 0xbd, 0x3f, 0x34, 0xed, 0x90, 0xfa, 0x91, 0xf7, 0xff, 0x0d, 0x60, + 0x33, 0x69, 0xcf, 0x35, 0x1d, 0xca, 0xbd, 0x5e, 0x20, 0x05, 0x2e, 0x69, 0x99, 0x0e, 0xc5, 0x9b, + 0xb0, 0x9c, 0xda, 0x24, 0xaf, 0x71, 0x05, 0x34, 0xb1, 0xeb, 0x73, 0x2e, 0xe7, 0xf7, 0x28, 0x90, + 0xa2, 0x3d, 0x81, 0xe2, 0x65, 0x58, 0xda, 0x8d, 0x68, 0xa2, 0xd3, 0xf0, 0x6d, 0x69, 0x83, 0x14, + 0x4a, 0xb6, 0x0a, 0x14, 0x27, 0x36, 0x44, 0x64, 0x10, 0x1b, 0xe1, 0x63, 0x04, 0x7a, 0xd7, 0xa7, + 0x5e, 0x3b, 0x30, 0x83, 0x98, 0xea, 0x47, 0x15, 0x96, 0x12, 0x42, 0x49, 0x75, 0x15, 0x4a, 0x96, + 0x7b, 0x44, 0xfd, 0xc0, 0x1a, 0xba, 0x3d, 0xcf, 0x0c, 0xc4, 0x95, 0x54, 0xb2, 0x18, 0x4b, 0x89, + 0x19, 0x50, 0x76, 0x6b, 0x37, 0x74, 0x7a, 0xb1, 0x17, 0xd5, 0xf5, 0x2c, 0x29, 0xb8, 0xa1, 0x23, + 0x9c, 0xc7, 0x52, 0xd2, 0x1c, 0x59, 0xbd, 0x29, 0xa6, 0x0c, 0x67, 0xd2, 0xcd, 0x91, 0xd5, 0x48, + 0x91, 0x55, 0x61, 0xd9, 0x0b, 0x6d, 0x3a, 0x0d, 0xcf, 0x72, 0xf8, 0x12, 0x53, 0xa5, 0xf0, 0xf8, + 0x53, 0x58, 0x66, 0x86, 0x37, 0xee, 0xa5, 0x4d, 0xbf, 0x00, 0xf3, 0xa1, 0x4f, 0xbd, 0x9e, 0x35, + 0x90, 0x61, 0xc8, 0xb3, 0x65, 0x63, 0x80, 0xae, 0x43, 0x96, 0x77, 0x06, 0x66, 0x66, 0x71, 0xe3, + 0x62, 0x14, 0xec, 0x57, 0x2e, 0x4f, 0x38, 0x0c, 0xef, 0x00, 0x62, 0x2a, 0x3f, 0xcd, 0xfe, 0x16, + 0xe4, 0x7c, 0x26, 0x90, 0x29, 0x77, 0x29, 0xc9, 0x32, 0x65, 0x09, 0x11, 0x48, 0xfc, 0x83, 0x0a, + 0x86, 0x68, 0x3f, 0xfe, 0xf6, 0xd0, 0x4b, 0xd6, 0x8c, 0xff, 0xa6, 0x6b, 0x77, 0x13, 0xb4, 0xa8, + 0x2a, 0x7b, 0x3e, 0x0d, 0x64, 0xfd, 0x9e, 0x9f, 0x55, 0xbf, 0x3e, 0x29, 0x46, 0xd0, 0x36, 0x0d, + 0x70, 0x03, 0x2a, 0x67, 0xda, 0x2c, 0x5d, 0x71, 0x0d, 0xf2, 0x0e, 0x87, 0x48, 0x5f, 0x94, 0xd2, + 0xbd, 0x96, 0x48, 0x2d, 0xfe, 0x49, 0x85, 0x73, 0x53, 0x15, 0xc9, 0xae, 0x70, 0xe8, 0x0d, 0x1d, + 0x19, 0xeb, 0x64, 0xb4, 0x4a, 0x4c, 0xde, 0x90, 0xe2, 0xc6, 0x20, 0x19, 0xce, 0xb9, 0x54, 0x38, + 0xef, 0x40, 0x9e, 0xa7, 0x76, 0xd4, 0x95, 0x96, 0x52, 0xb7, 0xda, 0x37, 0x2d, 0x6f, 0x6b, 0x45, + 0x3e, 0x38, 0x1a, 0x17, 0xd5, 0x06, 0xe6, 0x28, 0xa0, 0x1e, 0x91, 0xdb, 0xd0, 0xff, 0x20, 0x2f, + 0x3a, 0xc2, 0x5a, 0x96, 0x13, 0x2c, 0x46, 0x04, 0xc9, 0xa6, 0x21, 0x21, 0xf8, 0x5b, 0x15, 0x72, + 0xc2, 0xf4, 0x37, 0x15, 0xab, 0x32, 0x2c, 0x50, 0xb7, 0x3f, 0x1c, 0x58, 0xee, 0x11, 0x2f, 0x91, + 0x1c, 0x89, 0xd7, 0x08, 0xc9, 0xd4, 0x65, 0xb5, 0xa0, 0xc9, 0xfc, 0x5c, 0x83, 0xd5, 0x8e, 0x67, + 0xba, 0xfe, 0x21, 0xf5, 0xb8, 0x61, 0x71, 0x60, 0xf0, 0x97, 0x00, 0x13, 0x7f, 0x27, 0xfc, 0xa4, + 0xfe, 0x35, 0x3f, 0x55, 0x61, 0xde, 0x37, 0x9d, 0x91, 0x1d, 0xf7, 0xc9, 0x38, 0xd0, 0x6d, 0x2e, + 0x96, 0x9e, 0x8a, 0x40, 0xf8, 0x36, 0x14, 0x62, 0x6a, 0x66, 0x79, 0xdc, 0x11, 0x35, 0xc2, 0xbf, + 0xd1, 0x0a, 0xe4, 0x78, 0xbf, 0xe3, 0x8e, 0xd0, 0x88, 0x58, 0xe0, 0x1a, 0xe4, 0x05, 0xdf, 0x44, + 0x2f, 0x7a, 0x8e, 0x58, 0xb0, 0x5e, 0x39, 0xc3, 0x8b, 0xc5, 0x60, 0xe2, 0x42, 0x5c, 0x83, 0xc5, + 0x54, 0xaa, 0xa6, 0xde, 0x2e, 0xf5, 0xb5, 0xde, 0xae, 0x6f, 0xe6, 0xa0, 0x94, 0x9e, 0x15, 0xd0, + 0x6d, 0xc8, 0x06, 0x27, 0x23, 0x61, 0x4d, 0x69, 0xe3, 0xca, 0xec, 0x89, 0x42, 0x2e, 0x3b, 0x27, + 0x23, 0x4a, 0x38, 0x9c, 0x75, 0x63, 0x51, 0x00, 0xe2, 0x49, 0x10, 0xc9, 0x0b, 0x42, 0xc4, 0xda, + 0x31, 0x73, 0xcd, 0x31, 0xb5, 0x47, 0x3c, 0xa8, 0x05, 0xc2, 0xbf, 0x99, 0x2c, 0x74, 0xad, 0x60, + 0x2d, 0x27, 0x64, 0xec, 0x1b, 0x9f, 0x00, 0x4c, 0xc8, 0x51, 0x11, 0xe6, 0xbb, 0xad, 0x0f, 0x5a, + 0x7b, 0x0f, 0x5b, 0xba, 0xc2, 0x16, 0x77, 0xf7, 0xba, 0xad, 0x4e, 0x9d, 0xe8, 0x2a, 0x2a, 0x40, + 0x6e, 0xa7, 0xd6, 0xdd, 0xa9, 0xeb, 0x73, 0x68, 0x11, 0x0a, 0xf7, 0x1b, 0xed, 0xce, 0xde, 0x0e, + 0xa9, 0x35, 0xf5, 0x0c, 0x42, 0x50, 0xe2, 0x9a, 0x89, 0x2c, 0xcb, 0xb6, 0xb6, 0xbb, 0xcd, 0x66, + 0x8d, 0x7c, 0xac, 0xe7, 0xd8, 0x9c, 0xd3, 0x68, 0x6d, 0xef, 0xe9, 0x79, 0xa4, 0xc1, 0x42, 0xbb, + 0x53, 0xeb, 0xd4, 0xdb, 0xf5, 0x8e, 0x3e, 0x8f, 0x1b, 0x90, 0x17, 0x47, 0xff, 0xed, 0x2c, 0xc2, + 0x3d, 0xd0, 0x92, 0x2e, 0x47, 0x57, 0x53, 0x5e, 0x8d, 0xe9, 0xb8, 0x3a, 0xe1, 0xc5, 0x28, 0x7f, + 0x84, 0xfb, 0xa6, 0xf2, 0x27, 0xc3, 0x85, 0x32, 0x7f, 0xbe, 0x56, 0xa1, 0x34, 0x49, 0xfb, 0x6d, + 0xcb, 0xa6, 0xff, 0x44, 0x97, 0x29, 0xc3, 0xc2, 0xa1, 0x65, 0x53, 0x6e, 0x83, 0x38, 0x2e, 0x5e, + 0xcf, 0xac, 0xca, 0x55, 0x58, 0x89, 0xaa, 0xb2, 0xd3, 0xbe, 0xb7, 0x15, 0xd5, 0xe4, 0x7f, 0xdf, + 0x87, 0x42, 0x7c, 0x35, 0x16, 0xa9, 0xfa, 0x83, 0x6e, 0x6d, 0x57, 0x57, 0x58, 0xa4, 0x5a, 0x7b, + 0x9d, 0x9e, 0x58, 0xaa, 0xe8, 0x1c, 0x14, 0x49, 0x7d, 0xa7, 0xfe, 0x51, 0xaf, 0x59, 0xeb, 0xdc, + 0xbd, 0xaf, 0xcf, 0xb1, 0xd0, 0x09, 0x41, 0x6b, 0x4f, 0xca, 0x32, 0x1b, 0xa7, 0x39, 0x58, 0x88, + 0x6c, 0x67, 0xd9, 0xb9, 0x1f, 0xfa, 0xc7, 0x68, 0x65, 0xd6, 0x90, 0x5c, 0x3e, 0x3f, 0x25, 0x95, + 0x1d, 0x42, 0x41, 0x6f, 0x43, 0x8e, 0xcf, 0x55, 0x68, 0xe6, 0x9c, 0x5a, 0x9e, 0x3d, 0x7d, 0x62, + 0x05, 0xdd, 0x83, 0x62, 0x62, 0x1e, 0x3b, 0x63, 0xf7, 0xa5, 0x94, 0x34, 0x3d, 0xba, 0x61, 0xe5, + 0xa6, 0x8a, 0xee, 0x43, 0x31, 0x31, 0x0e, 0xa1, 0x72, 0x2a, 0x99, 0x52, 0x83, 0xd5, 0x84, 0x6b, + 0xc6, 0xfc, 0x84, 0x15, 0x54, 0x07, 0x98, 0x4c, 0x42, 0xe8, 0x62, 0x0a, 0x9c, 0x1c, 0x99, 0xca, + 0xe5, 0x59, 0xaa, 0x98, 0x66, 0x0b, 0x0a, 0xf1, 0x1c, 0x80, 0xd6, 0x66, 0x8c, 0x06, 0x82, 0xe4, + 0xec, 0xa1, 0x01, 0x2b, 0x68, 0x1b, 0xb4, 0x9a, 0x6d, 0xbf, 0x0e, 0x4d, 0x39, 0xa9, 0xf1, 0xa7, + 0x79, 0x6c, 0xb8, 0x70, 0xc6, 0xd3, 0x8b, 0xae, 0xa5, 0x9b, 0xcf, 0x59, 0xf3, 0x44, 0xf9, 0x3f, + 0x7f, 0x88, 0x8b, 0x4f, 0x6b, 0x42, 0x29, 0xfd, 0x8c, 0xa0, 0xb3, 0xc6, 0xe8, 0xb2, 0x11, 0x2b, + 0x66, 0xbf, 0x3b, 0xca, 0x3a, 0x8b, 0xac, 0x96, 0xcc, 0x7f, 0xb4, 0xfa, 0x2a, 0x19, 0x2b, 0xcd, + 0xf2, 0xbf, 0xa6, 0xb9, 0x92, 0xd5, 0xc2, 0x98, 0xb6, 0xde, 0x7d, 0xfa, 0xc2, 0x50, 0x9e, 0xbd, + 0x30, 0x94, 0x97, 0x2f, 0x0c, 0xf5, 0xab, 0xb1, 0xa1, 0x7e, 0x3f, 0x36, 0xd4, 0x27, 0x63, 0x43, + 0x7d, 0x3a, 0x36, 0xd4, 0x5f, 0xc6, 0x86, 0xfa, 0xeb, 0xd8, 0x50, 0x5e, 0x8e, 0x0d, 0xf5, 0xf1, + 0xa9, 0xa1, 0x3c, 0x3d, 0x35, 0x94, 0x67, 0xa7, 0x86, 0xf2, 0x49, 0xbe, 0x6f, 0x5b, 0xd4, 0x0d, + 0x0e, 0xf2, 0xfc, 0x5f, 0xf5, 0xd6, 0xef, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5b, 0xa2, 0xf3, 0x2b, + 0xf2, 0x0e, 0x00, 0x00, } func (x MatchType) String() string { @@ -1543,6 +1670,13 @@ func (x WriteRequest_SourceEnum) String() string { } return strconv.Itoa(int(x)) } +func (x MetricMetadata_MetricType) String() string { + s, ok := MetricMetadata_MetricType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} func (this *WriteRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1573,6 +1707,14 @@ func (this *WriteRequest) Equal(that interface{}) bool { if this.Source != that1.Source { return false } + if len(this.Metadata) != len(that1.Metadata) { + return false + } + for i := range this.Metadata { + if !this.Metadata[i].Equal(that1.Metadata[i]) { + return false + } + } return true } func (this *WriteResponse) Equal(that interface{}) bool { @@ -2249,6 +2391,39 @@ func (this *LabelMatchers) Equal(that interface{}) bool { } return true } +func (this *MetricMetadata) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MetricMetadata) + if !ok { + that2, ok := that.(MetricMetadata) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.MetricName != that1.MetricName { + return false + } + if this.Help != that1.Help { + return false + } + if this.Unit != that1.Unit { + return false + } + return true +} func (this *Metric) Equal(that interface{}) bool { if that == nil { return this == nil @@ -2366,10 +2541,13 @@ func (this *WriteRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&client.WriteRequest{") s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n") s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") + if this.Metadata != nil { + s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -2663,6 +2841,19 @@ func (this *LabelMatchers) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *MetricMetadata) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.MetricMetadata{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "MetricName: "+fmt.Sprintf("%#v", this.MetricName)+",\n") + s = append(s, "Help: "+fmt.Sprintf("%#v", this.Help)+",\n") + s = append(s, "Unit: "+fmt.Sprintf("%#v", this.Unit)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *Metric) GoString() string { if this == nil { return "nil" @@ -3240,6 +3431,20 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Metadata) > 0 { + for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if m.Source != 0 { i = encodeVarintCortex(dAtA, i, uint64(m.Source)) i-- @@ -4135,6 +4340,55 @@ func (m *LabelMatchers) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } + if len(m.Help) > 0 { + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x22 + } + if len(m.MetricName) > 0 { + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintCortex(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *Metric) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4314,6 +4568,12 @@ func (m *WriteRequest) Size() (n int) { if m.Source != 0 { n += 1 + sovCortex(uint64(m.Source)) } + if len(m.Metadata) > 0 { + for _, e := range m.Metadata { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } return n } @@ -4691,6 +4951,30 @@ func (m *LabelMatchers) Size() (n int) { return n } +func (m *MetricMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovCortex(uint64(m.Type)) + } + l = len(m.MetricName) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Help) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + func (m *Metric) Size() (n int) { if m == nil { return 0 @@ -4770,9 +5054,15 @@ func (this *WriteRequest) String() string { if this == nil { return "nil" } + repeatedStringForMetadata := "[]*MetricMetadata{" + for _, f := range this.Metadata { + repeatedStringForMetadata += strings.Replace(f.String(), "MetricMetadata", "MetricMetadata", 1) + "," + } + repeatedStringForMetadata += "}" s := strings.Join([]string{`&WriteRequest{`, `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`, `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Metadata:` + repeatedStringForMetadata + `,`, `}`, }, "") return s @@ -5081,6 +5371,19 @@ func (this *LabelMatchers) String() string { }, "") return s } +func (this *MetricMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricMetadata{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `Help:` + fmt.Sprintf("%v", this.Help) + `,`, + `Unit:` + fmt.Sprintf("%v", this.Unit) + `,`, + `}`, + }, "") + return s +} func (this *Metric) String() string { if this == nil { return "nil" @@ -5215,6 +5518,40 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metadata = append(m.Metadata, &MetricMetadata{}) + if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipCortex(dAtA[iNdEx:]) @@ -7479,6 +7816,174 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error { } return nil } +func (m *MetricMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricMetadata_MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Metric) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto index 8ca40645277f..6a341b599831 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto @@ -34,6 +34,7 @@ message WriteRequest { RULE = 1; } SourceEnum Source = 2; + repeated MetricMetadata metadata = 3 [(gogoproto.nullable) = true]; } message WriteResponse {} @@ -142,6 +143,24 @@ message LabelMatchers { repeated LabelMatcher matchers = 1; } +message MetricMetadata { + enum MetricType { + UNKNOWN = 0; + COUNTER = 1; + GAUGE = 2; + HISTOGRAM = 3; + GAUGEHISTOGRAM = 4; + SUMMARY = 5; + INFO = 6; + STATESET = 7; + } + + MetricType type = 1; + string metric_name = 2; + string help = 4; + string unit = 5; +} + message Metric { repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"]; } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go index 99302f3c843d..0241f16e48b4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go @@ -68,3 +68,10 @@ func HashAdd32(h uint32, s string) uint32 { } return h } + +// HashAddByte32 adds a byte to a fnv32 hash value, returning the updated hash. +func HashAddByte32(h uint32, b byte) uint32 { + h *= prime32 + h ^= uint32(b) + return h +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go index 165c17ab6151..73f619163fbd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go @@ -43,7 +43,7 @@ type PreallocConfig struct{} // RegisterFlags registers configuration settings. func (PreallocConfig) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&expectedTimeseries, "ingester-client.expected-timeseries", expectedTimeseries, "Expected number of timeseries per request, use for preallocations.") + f.IntVar(&expectedTimeseries, "ingester-client.expected-timeseries", expectedTimeseries, "Expected number of timeseries per request, used for preallocations.") f.IntVar(&expectedLabels, "ingester-client.expected-labels", expectedLabels, "Expected number of labels per timeseries, used for preallocations.") f.IntVar(&expectedSamplesPerSeries, "ingester-client.expected-samples-per-series", expectedSamplesPerSeries, "Expected number of samples per timeseries, used for preallocations.") } @@ -266,11 +266,12 @@ func (bs *LabelAdapter) Compare(other LabelAdapter) int { } // ReuseSlice puts the slice back into a sync.Pool for reuse. -func ReuseSlice(slice []PreallocTimeseries) { - for i := range slice { - ReuseTimeseries(slice[i].TimeSeries) +func ReuseSlice(ts []PreallocTimeseries) { + for i := range ts { + ReuseTimeseries(ts[i].TimeSeries) } - slicePool.Put(slice[:0]) //nolint:staticcheck //see comment on slicePool for more details + + slicePool.Put(ts[:0]) //nolint:staticcheck //see comment on slicePool for more details } // ReuseTimeseries puts the timeseries back into a sync.Pool for reuse. diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go index f388954ab2c9..1306646c5684 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go @@ -359,13 +359,17 @@ FindQueue: return nil, err } - i, n := 0, rand.Intn(len(f.queues)) - for userID, queue := range f.queues { - if i < n { - i++ + keys := make([]string, 0, len(f.queues)) + for k := range f.queues { + keys = append(keys, k) + } + rand.Shuffle(len(keys), func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }) + + for _, userID := range keys { + queue, ok := f.queues[userID] + if !ok { continue } - /* We want to dequeue the next unexpired request from the chosen tenant queue. The chance of choosing a particular tenant for dequeueing is (1/active_tenants). diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go index 6315d9acbe6b..d399697a52ae 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go @@ -44,7 +44,7 @@ func (cfg *WorkerConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.Parallelism, "querier.worker-parallelism", 10, "Number of simultaneous queries to process.") f.DurationVar(&cfg.DNSLookupDuration, "querier.dns-lookup-period", 10*time.Second, "How often to query DNS.") - cfg.GRPCClientConfig.RegisterFlags("querier.frontend-client", f) + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", f) } // Worker is the counter-part to the frontend, actually processing requests. diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go index 72adb981802e..29e75816fe96 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go @@ -168,25 +168,19 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { } // shouldCacheResponse says whether the response should be cached or not. -func shouldCacheResponse(r Response) bool { +func (s resultsCache) shouldCacheResponse(r Response) bool { if promResp, ok := r.(*PrometheusResponse); ok { - shouldCache := true - outer: for _, hv := range promResp.Headers { - if hv == nil { + if hv.GetName() != cachecontrolHeader { continue } - if hv.Name != cachecontrolHeader { - continue - } - for _, v := range hv.Values { + for _, v := range hv.GetValues() { if v == noCacheValue { - shouldCache = false - break outer + level.Debug(s.logger).Log("msg", fmt.Sprintf("%s header in response is equal to %s, not caching the response", cachecontrolHeader, noCacheValue)) + return false } } } - return shouldCache } return true } @@ -197,8 +191,7 @@ func (s resultsCache) handleMiss(ctx context.Context, r Request) (Response, []Ex return nil, nil, err } - if !shouldCacheResponse(response) { - level.Debug(s.logger).Log("msg", fmt.Sprintf("%s header in response is equal to %s, not caching the response", cachecontrolHeader, noCacheValue)) + if !s.shouldCacheResponse(response) { return response, []Extent{}, nil } @@ -238,6 +231,9 @@ func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent for _, reqResp := range reqResps { responses = append(responses, reqResp.Response) + if !s.shouldCacheResponse(reqResp.Response) { + continue + } extent, err := toExtent(ctx, reqResp.Request, reqResp.Response) if err != nil { return nil, nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go new file mode 100644 index 000000000000..32adb13e858b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go @@ -0,0 +1,456 @@ +package ring + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +type BasicLifecyclerDelegate interface { + // OnRingInstanceRegister is called while the lifecycler is registering the + // instance within the ring and should return the state and set of tokens to + // use for the instance itself. + OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) + + // OnRingInstanceTokens is called once the instance tokens are set and are + // stable within the ring (honoring the observe period, if set). + OnRingInstanceTokens(lifecycler *BasicLifecycler, tokens Tokens) + + // OnRingInstanceStopping is called while the lifecycler is stopping. The lifecycler + // will continue to hearbeat the ring the this function is executing and will proceed + // to unregister the instance from the ring only after this function has returned. + OnRingInstanceStopping(lifecycler *BasicLifecycler) +} + +type BasicLifecyclerConfig struct { + // ID is the instance unique ID. + ID string + + // Addr is the instance address, in the form "address:port". + Addr string + + // Zone is the instance availability zone. Can be an empty string + // if zone awareness is unused. + Zone string + + HeartbeatPeriod time.Duration + TokensObservePeriod time.Duration + NumTokens int +} + +// BasicLifecycler is a basic ring lifecycler which allows to hook custom +// logic at different stages of the lifecycle. This lifecycler should be +// used to build higher level lifecyclers. +// +// This lifecycler never change the instance state. It's the delegate +// responsibility to ChangeState(). +type BasicLifecycler struct { + *services.BasicService + + cfg BasicLifecyclerConfig + logger log.Logger + store kv.Client + delegate BasicLifecyclerDelegate + metrics *BasicLifecyclerMetrics + + // Channel used to execute logic within the lifecycler loop. + actorChan chan func() + + // These values are initialised at startup, and never change + ringName string + ringKey string + + // The current instance state. + currState sync.RWMutex + currInstanceDesc *IngesterDesc +} + +// NewBasicLifecycler makes a new BasicLifecycler. +func NewBasicLifecycler(cfg BasicLifecyclerConfig, ringName, ringKey string, store kv.Client, delegate BasicLifecyclerDelegate, logger log.Logger, reg prometheus.Registerer) (*BasicLifecycler, error) { + l := &BasicLifecycler{ + cfg: cfg, + ringName: ringName, + ringKey: ringKey, + logger: logger, + store: store, + delegate: delegate, + metrics: NewBasicLifecyclerMetrics(ringName, reg), + actorChan: make(chan func()), + } + + l.metrics.tokensToOwn.Set(float64(cfg.NumTokens)) + l.BasicService = services.NewBasicService(l.starting, l.running, l.stopping) + + return l, nil +} + +func (l *BasicLifecycler) GetInstanceID() string { + return l.cfg.ID +} + +func (l *BasicLifecycler) GetInstanceAddr() string { + return l.cfg.Addr +} + +func (l *BasicLifecycler) GetInstanceZone() string { + return l.cfg.Zone +} + +func (l *BasicLifecycler) GetState() IngesterState { + l.currState.RLock() + defer l.currState.RUnlock() + + if l.currInstanceDesc == nil { + return PENDING + } + + return l.currInstanceDesc.GetState() +} + +func (l *BasicLifecycler) GetTokens() Tokens { + l.currState.RLock() + defer l.currState.RUnlock() + + if l.currInstanceDesc == nil { + return Tokens{} + } + + return l.currInstanceDesc.GetTokens() +} + +// IsRegistered returns whether the instance is currently registered within the ring. +func (l *BasicLifecycler) IsRegistered() bool { + l.currState.RLock() + defer l.currState.RUnlock() + + return l.currInstanceDesc != nil +} + +func (l *BasicLifecycler) ChangeState(ctx context.Context, state IngesterState) error { + return l.run(func() error { + return l.changeState(ctx, state) + }) +} + +func (l *BasicLifecycler) starting(ctx context.Context) error { + if err := l.registerInstance(ctx); err != nil { + return errors.Wrap(err, "register instance in the ring") + } + + // If we have registered an instance with some tokens and + // an observe period has been configured, we should now wait + // until tokens are "stable" within the ring. + if len(l.GetTokens()) > 0 && l.cfg.TokensObservePeriod > 0 { + if err := l.waitStableTokens(ctx, l.cfg.TokensObservePeriod); err != nil { + return errors.Wrap(err, "wait stable tokens in the ring") + } + } + + // At this point, if some tokens have been set they're stable and we + // can notify the delegate. + if tokens := l.GetTokens(); len(tokens) > 0 { + l.metrics.tokensOwned.Set(float64(len(tokens))) + l.delegate.OnRingInstanceTokens(l, tokens) + } + + return nil +} + +func (l *BasicLifecycler) running(ctx context.Context) error { + heartbeatTicker := time.NewTicker(l.cfg.HeartbeatPeriod) + defer heartbeatTicker.Stop() + + for { + select { + case <-heartbeatTicker.C: + l.heartbeat(ctx) + + case f := <-l.actorChan: + f() + + case <-ctx.Done(): + level.Info(util.Logger).Log("msg", "ring lifecycler is shutting down", "ring", l.ringName) + return nil + } + } +} + +func (l *BasicLifecycler) stopping(runningError error) error { + if runningError != nil { + return nil + } + + // Let the delegate change the instance state (ie. to LEAVING) and handling any + // state transferring / flushing while we continue to heartbeat. + done := make(chan struct{}) + go func() { + l.delegate.OnRingInstanceStopping(l) + close(done) + }() + + // Heartbeat while the stopping delegate function is running. + heartbeatTicker := time.NewTicker(l.cfg.HeartbeatPeriod) + defer heartbeatTicker.Stop() + +heartbeatLoop: + for { + select { + case <-heartbeatTicker.C: + l.heartbeat(context.Background()) + case <-done: + break heartbeatLoop + } + } + + // Remove the instance from the ring. + if err := l.unregisterInstance(context.Background()); err != nil { + return errors.Wrapf(err, "failed to unregister instance from the ring (ring: %s)", l.ringName) + } + level.Info(l.logger).Log("msg", "instance removed from the ring", "ring", l.ringName) + + return nil +} + +// registerInstance registers the instance in the ring. The initial state and set of tokens +// depends on the OnRingInstanceRegister() delegate function. +func (l *BasicLifecycler) registerInstance(ctx context.Context) error { + var instanceDesc IngesterDesc + + err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + ringDesc := GetOrCreateRingDesc(in) + + var exists bool + instanceDesc, exists = ringDesc.Ingesters[l.cfg.ID] + if exists { + level.Info(l.logger).Log("msg", "instance found in the ring", "instance", l.cfg.ID, "ring", l.ringName, "state", instanceDesc.GetState(), "tokens", len(instanceDesc.GetTokens())) + } else { + level.Info(l.logger).Log("msg", "instance not found in the ring", "instance", l.cfg.ID, "ring", l.ringName) + } + + // We call the delegate to get the desired state right after the initialization. + state, tokens := l.delegate.OnRingInstanceRegister(l, *ringDesc, exists, l.cfg.ID, instanceDesc) + + // Ensure tokens are sorted. + sort.Sort(tokens) + + if !exists { + instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, tokens, state) + return ringDesc, true, nil + } + + if instanceDesc.State != state || !tokens.Equals(instanceDesc.Tokens) { + instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, tokens, state) + return ringDesc, true, nil + } + + // We haven't modified the ring, so don't try to store it. + return nil, true, nil + }) + + if err != nil { + return err + } + + l.currState.Lock() + l.currInstanceDesc = &instanceDesc + l.currState.Unlock() + + return nil +} + +func (l *BasicLifecycler) waitStableTokens(ctx context.Context, period time.Duration) error { + heartbeatTicker := time.NewTicker(l.cfg.HeartbeatPeriod) + defer heartbeatTicker.Stop() + + // The first observation will occur after the specified period. + level.Info(l.logger).Log("msg", "waiting stable tokens", "ring", l.ringName) + observeChan := time.After(period) + + for { + select { + case <-observeChan: + if !l.verifyTokens(ctx) { + // The verification has failed + level.Info(l.logger).Log("msg", "tokens verification failed, keep observing", "ring", l.ringName) + observeChan = time.After(period) + break + } + + level.Info(l.logger).Log("msg", "tokens verification succeeded", "ring", l.ringName) + return nil + + case <-heartbeatTicker.C: + l.heartbeat(ctx) + + case <-ctx.Done(): + return ctx.Err() + } + } +} + +// Verifies that tokens that this instance has registered to the ring still belong to it. +// Gossiping ring may change the ownership of tokens in case of conflicts. +// If instance doesn't own its tokens anymore, this method generates new tokens and stores them to the ring. +func (l *BasicLifecycler) verifyTokens(ctx context.Context) bool { + result := false + + err := l.updateInstance(ctx, func(r Desc, i *IngesterDesc) bool { + // At this point, we should have the same tokens as we have registered before. + actualTokens, takenTokens := r.TokensFor(l.cfg.ID) + + if actualTokens.Equals(l.GetTokens()) { + // Tokens have been verified. No need to change them. + result = true + return false + } + + // uh, oh... our tokens are not our anymore. Let's try new ones. + needTokens := l.cfg.NumTokens - len(actualTokens) + + level.Info(l.logger).Log("msg", "generating new tokens", "count", needTokens, "ring", l.ringName) + newTokens := GenerateTokens(needTokens, takenTokens) + + actualTokens = append(actualTokens, newTokens...) + sort.Sort(actualTokens) + + i.Tokens = actualTokens + return true + }) + + if err != nil { + level.Error(l.logger).Log("msg", "failed to verify tokens", "ring", l.ringName, "err", err) + return false + } + + return result +} + +// unregister removes our entry from the store. +func (l *BasicLifecycler) unregisterInstance(ctx context.Context) error { + level.Info(l.logger).Log("msg", "unregistering instance from ring", "ring", l.ringName) + + err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + if in == nil { + return nil, false, fmt.Errorf("found empty ring when trying to unregister") + } + + ringDesc := in.(*Desc) + ringDesc.RemoveIngester(l.cfg.ID) + return ringDesc, true, nil + }) + + if err != nil { + return err + } + + l.currState.Lock() + l.currInstanceDesc = nil + l.currState.Unlock() + + l.metrics.tokensToOwn.Set(0) + l.metrics.tokensOwned.Set(0) + return nil +} + +func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(Desc, *IngesterDesc) bool) error { + var instanceDesc IngesterDesc + + err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + ringDesc := GetOrCreateRingDesc(in) + + var ok bool + instanceDesc, ok = ringDesc.Ingesters[l.cfg.ID] + + // This could happen if the backend store restarted (and content deleted) + // or the instance has been forgotten. In this case, we do re-insert it. + if !ok { + level.Warn(l.logger).Log("msg", "instance missing in the ring, adding it back", "ring", l.ringName) + instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, l.GetTokens(), l.GetState()) + } + + changed := update(*ringDesc, &instanceDesc) + if ok && !changed { + return nil, false, nil + } + + ringDesc.Ingesters[l.cfg.ID] = instanceDesc + return ringDesc, true, nil + }) + + if err != nil { + return err + } + + l.currState.Lock() + l.currInstanceDesc = &instanceDesc + l.currState.Unlock() + + return nil +} + +// heartbeat updates the instance timestamp within the ring. This function is guaranteed +// to be called within the lifecycler main goroutine. +func (l *BasicLifecycler) heartbeat(ctx context.Context) { + err := l.updateInstance(ctx, func(_ Desc, i *IngesterDesc) bool { + i.Timestamp = time.Now().Unix() + return true + }) + + if err != nil { + level.Warn(l.logger).Log("msg", "failed to heartbeat instance in the ring", "ring", l.ringName, "err", err) + return + } + + l.metrics.heartbeats.Inc() +} + +// changeState of the instance within the ring. This function is guaranteed +// to be called within the lifecycler main goroutine. +func (l *BasicLifecycler) changeState(ctx context.Context, state IngesterState) error { + err := l.updateInstance(ctx, func(_ Desc, i *IngesterDesc) bool { + // No-op if the state hasn't changed. + if i.State == state { + return false + } + + i.State = state + return true + }) + + if err != nil { + level.Warn(l.logger).Log("msg", "failed to change instance state in the ring", "from", l.GetState(), "to", state, "err", err) + } + + return err +} + +// run a function within the lifecycler service goroutine. +func (l *BasicLifecycler) run(fn func() error) error { + sc := l.ServiceContext() + if sc == nil { + return errors.New("lifecycler not running") + } + + errCh := make(chan error) + wrappedFn := func() { + errCh <- fn() + } + + select { + case <-sc.Done(): + return errors.New("lifecycler not running") + case l.actorChan <- wrappedFn: + return <-errCh + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go new file mode 100644 index 000000000000..80dead01947c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go @@ -0,0 +1,98 @@ +package ring + +import ( + "context" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +type LeaveOnStoppingDelegate struct { + next BasicLifecyclerDelegate + logger log.Logger +} + +func NewLeaveOnStoppingDelegate(next BasicLifecyclerDelegate, logger log.Logger) *LeaveOnStoppingDelegate { + return &LeaveOnStoppingDelegate{ + next: next, + logger: logger, + } +} + +func (d *LeaveOnStoppingDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) { + return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc) +} + +func (d *LeaveOnStoppingDelegate) OnRingInstanceTokens(lifecycler *BasicLifecycler, tokens Tokens) { + d.next.OnRingInstanceTokens(lifecycler, tokens) +} + +func (d *LeaveOnStoppingDelegate) OnRingInstanceStopping(lifecycler *BasicLifecycler) { + if err := lifecycler.changeState(context.Background(), LEAVING); err != nil { + level.Error(d.logger).Log("msg", "failed to change instance state to LEAVING in the ring", "err", err) + } + + d.next.OnRingInstanceStopping(lifecycler) +} + +type TokensPersistencyDelegate struct { + next BasicLifecyclerDelegate + logger log.Logger + tokensPath string + loadState IngesterState +} + +func NewTokensPersistencyDelegate(path string, state IngesterState, next BasicLifecyclerDelegate, logger log.Logger) *TokensPersistencyDelegate { + return &TokensPersistencyDelegate{ + next: next, + logger: logger, + tokensPath: path, + loadState: state, + } +} + +func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) { + // Skip if no path has been configured. + if d.tokensPath == "" { + level.Info(d.logger).Log("msg", "not loading tokens from file, tokens file path is empty") + return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc) + } + + // Do not load tokens from disk if the instance is already in the ring and has some tokens. + if instanceExists && len(instanceDesc.GetTokens()) > 0 { + level.Info(d.logger).Log("msg", "not loading tokens from file, instance already in the ring") + return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc) + } + + tokensFromFile, err := LoadTokensFromFile(d.tokensPath) + if err != nil { + level.Error(d.logger).Log("msg", "error in getting tokens from file", "err", err) + return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc) + } + + // Signal the next delegate that the tokens have been loaded, miming the + // case the instance exist in the ring (which is OK because the lifecycler + // will correctly reconcile this case too). + return d.next.OnRingInstanceRegister(lifecycler, ringDesc, true, lifecycler.GetInstanceID(), IngesterDesc{ + Addr: lifecycler.GetInstanceAddr(), + Timestamp: time.Now().Unix(), + State: d.loadState, + Tokens: tokensFromFile, + Zone: lifecycler.GetInstanceZone(), + }) +} + +func (d *TokensPersistencyDelegate) OnRingInstanceTokens(lifecycler *BasicLifecycler, tokens Tokens) { + if d.tokensPath != "" { + if err := tokens.StoreToFile(d.tokensPath); err != nil { + level.Error(d.logger).Log("msg", "error storing tokens to disk", "path", d.tokensPath, "err", err) + } + } + + d.next.OnRingInstanceTokens(lifecycler, tokens) +} + +func (d *TokensPersistencyDelegate) OnRingInstanceStopping(lifecycler *BasicLifecycler) { + d.next.OnRingInstanceStopping(lifecycler) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_metrics.go new file mode 100644 index 000000000000..d23e94023369 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_metrics.go @@ -0,0 +1,32 @@ +package ring + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type BasicLifecyclerMetrics struct { + heartbeats prometheus.Counter + tokensOwned prometheus.Gauge + tokensToOwn prometheus.Gauge +} + +func NewBasicLifecyclerMetrics(ringName string, reg prometheus.Registerer) *BasicLifecyclerMetrics { + return &BasicLifecyclerMetrics{ + heartbeats: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ring_member_heartbeats_total", + Help: "The total number of heartbeats sent.", + ConstLabels: prometheus.Labels{"name": ringName}, + }), + tokensOwned: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_ring_member_tokens_owned", + Help: "The number of tokens owned in the ring.", + ConstLabels: prometheus.Labels{"name": ringName}, + }), + tokensToOwn: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_ring_member_tokens_to_own", + Help: "The number of tokens to own in the ring.", + ConstLabels: prometheus.Labels{"name": ringName}, + }), + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/pool.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go similarity index 58% rename from vendor/github.com/cortexproject/cortex/pkg/ingester/client/pool.go rename to vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go index 9468ebf6f08c..bc1931a2eca6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/pool.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go @@ -2,7 +2,6 @@ package client import ( "context" - "flag" "fmt" "io" "sync" @@ -11,7 +10,6 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" "github.com/weaveworks/common/user" "google.golang.org/grpc/health/grpc_health_v1" @@ -20,65 +18,64 @@ import ( "github.com/cortexproject/cortex/pkg/util/services" ) -var clients = promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "distributor_ingester_clients", - Help: "The current number of ingester clients.", -}) +// PoolClient is the interface that should be implemented by a +// client managed by the pool. +type PoolClient interface { + grpc_health_v1.HealthClient + io.Closer +} -// Factory defines the signature for an ingester client factory. -type Factory func(addr string) (grpc_health_v1.HealthClient, error) +// PoolFactory defines the signature for a client factory. +type PoolFactory func(addr string) (PoolClient, error) // PoolConfig is config for creating a Pool. type PoolConfig struct { - ClientCleanupPeriod time.Duration `yaml:"client_cleanup_period"` - HealthCheckIngesters bool `yaml:"health_check_ingesters"` - RemoteTimeout time.Duration `yaml:"-"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *PoolConfig) RegisterFlags(f *flag.FlagSet) { - f.DurationVar(&cfg.ClientCleanupPeriod, "distributor.client-cleanup-period", 15*time.Second, "How frequently to clean up clients for ingesters that have gone away.") - f.BoolVar(&cfg.HealthCheckIngesters, "distributor.health-check-ingesters", true, "Run a health check on each ingester client during periodic cleanup.") + CheckInterval time.Duration + HealthCheckEnabled bool + HealthCheckTimeout time.Duration } // Pool holds a cache of grpc_health_v1 clients. type Pool struct { services.Service - cfg PoolConfig - ring ring.ReadRing - factory Factory - logger log.Logger + cfg PoolConfig + ring ring.ReadRing + factory PoolFactory + logger log.Logger + clientName string sync.RWMutex - clients map[string]grpc_health_v1.HealthClient + clients map[string]PoolClient + + clientsMetric prometheus.Gauge } // NewPool creates a new Pool. -func NewPool(cfg PoolConfig, ring ring.ReadRing, factory Factory, logger log.Logger) *Pool { +func NewPool(clientName string, cfg PoolConfig, ring ring.ReadRing, factory PoolFactory, clientsMetric prometheus.Gauge, logger log.Logger) *Pool { p := &Pool{ - cfg: cfg, - ring: ring, - factory: factory, - logger: logger, - - clients: map[string]grpc_health_v1.HealthClient{}, + cfg: cfg, + ring: ring, + factory: factory, + logger: logger, + clientName: clientName, + clients: map[string]PoolClient{}, + clientsMetric: clientsMetric, } - p.Service = services.NewTimerService(cfg.ClientCleanupPeriod, nil, p.iteration, nil) + p.Service = services.NewTimerService(cfg.CheckInterval, nil, p.iteration, nil) return p } func (p *Pool) iteration(ctx context.Context) error { p.removeStaleClients() - if p.cfg.HealthCheckIngesters { + if p.cfg.HealthCheckEnabled { p.cleanUnhealthy() } return nil } -func (p *Pool) fromCache(addr string) (grpc_health_v1.HealthClient, bool) { +func (p *Pool) fromCache(addr string) (PoolClient, bool) { p.RLock() defer p.RUnlock() client, ok := p.clients[addr] @@ -87,7 +84,7 @@ func (p *Pool) fromCache(addr string) (grpc_health_v1.HealthClient, bool) { // GetClientFor gets the client for the specified address. If it does not exist it will make a new client // at that address -func (p *Pool) GetClientFor(addr string) (grpc_health_v1.HealthClient, error) { +func (p *Pool) GetClientFor(addr string) (PoolClient, error) { client, ok := p.fromCache(addr) if ok { return client, nil @@ -105,7 +102,9 @@ func (p *Pool) GetClientFor(addr string) (grpc_health_v1.HealthClient, error) { return nil, err } p.clients[addr] = client - clients.Add(1) + if p.clientsMetric != nil { + p.clientsMetric.Add(1) + } return client, nil } @@ -116,13 +115,15 @@ func (p *Pool) RemoveClientFor(addr string) { client, ok := p.clients[addr] if ok { delete(p.clients, addr) - clients.Add(-1) + if p.clientsMetric != nil { + p.clientsMetric.Add(-1) + } // Close in the background since this operation may take awhile and we have a mutex - go func(addr string, closer io.Closer) { + go func(addr string, closer PoolClient) { if err := closer.Close(); err != nil { - level.Error(p.logger).Log("msg", "error closing connection to ingester", "ingester", addr, "err", err) + level.Error(p.logger).Log("msg", fmt.Sprintf("error closing connection to %s", p.clientName), "addr", addr, "err", err) } - }(addr, client.(io.Closer)) + }(addr, client) } } @@ -165,15 +166,15 @@ func (p *Pool) removeStaleClients() { } } -// cleanUnhealthy loops through all ingesters and deletes any that fails a healthcheck. +// cleanUnhealthy loops through all servers and deletes any that fails a healthcheck. func (p *Pool) cleanUnhealthy() { for _, addr := range p.RegisteredAddresses() { client, ok := p.fromCache(addr) // not ok means someone removed a client between the start of this loop and now if ok { - err := healthCheck(client, p.cfg.RemoteTimeout) + err := healthCheck(client, p.cfg.HealthCheckTimeout) if err != nil { - level.Warn(util.Logger).Log("msg", "removing ingester failing healthcheck", "addr", addr, "reason", err) + level.Warn(util.Logger).Log("msg", fmt.Sprintf("removing %s failing healthcheck", p.clientName), "addr", addr, "reason", err) p.RemoveClientFor(addr) } } @@ -181,7 +182,7 @@ func (p *Pool) cleanUnhealthy() { } // healthCheck will check if the client is still healthy, returning an error if it is not -func healthCheck(client grpc_health_v1.HealthClient, timeout time.Duration) error { +func healthCheck(client PoolClient, timeout time.Duration) error { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() ctx = user.InjectOrgID(ctx, "0") @@ -191,7 +192,7 @@ func healthCheck(client grpc_health_v1.HealthClient, timeout time.Duration) erro return err } if resp.Status != grpc_health_v1.HealthCheckResponse_SERVING { - return fmt.Errorf("Failing healthcheck status: %s", resp.Status) + return fmt.Errorf("failing healthcheck status: %s", resp.Status) } return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go index f18fb455d716..9b5c9188e034 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go @@ -67,9 +67,18 @@ func (cfg *Config) RegisterFlagsWithPrefix(flagsPrefix, defaultPrefix string, f // It also deals with serialisation by using a Codec and having a instance of // the the desired type passed in to methods ala json.Unmarshal. type Client interface { + // List returns a list of keys under the given prefix. Returned keys will + // include the prefix. + List(ctx context.Context, prefix string) ([]string, error) + // Get a specific key. Will use a codec to deserialise key to appropriate type. + // If the key does not exist, Get will return nil and no error. Get(ctx context.Context, key string) (interface{}, error) + // Delete a specific key. Deletions are best-effort and no error will + // be returned if the key does not exist. + Delete(ctx context.Context, key string) error + // CAS stands for Compare-And-Swap. Will call provided callback f with the // current value of the key and allow callback to return a different value. // Will then attempt to atomically swap the current value for the new value. diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go index 3fdc24c54364..aa5cc7f260a5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go @@ -47,6 +47,7 @@ type kv interface { CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.WriteMeta, error) Get(key string, q *consul.QueryOptions) (*consul.KVPair, *consul.QueryMeta, error) List(path string, q *consul.QueryOptions) (consul.KVPairs, *consul.QueryMeta, error) + Delete(key string, q *consul.WriteOptions) (*consul.WriteMeta, error) Put(p *consul.KVPair, q *consul.WriteOptions) (*consul.WriteMeta, error) } @@ -283,6 +284,24 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, } } +// List implements kv.List. +func (c *Client) List(ctx context.Context, prefix string) ([]string, error) { + options := &consul.QueryOptions{ + AllowStale: !c.cfg.ConsistentReads, + RequireConsistent: c.cfg.ConsistentReads, + } + pairs, _, err := c.kv.List(prefix, options.WithContext(ctx)) + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(pairs)) + for _, kvp := range pairs { + keys = append(keys, kvp.Key) + } + return keys, nil +} + // Get implements kv.Get. func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { options := &consul.QueryOptions{ @@ -298,6 +317,12 @@ func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { return c.codec.Decode(kvp.Value) } +// Delete implements kv.Delete. +func (c *Client) Delete(ctx context.Context, key string) error { + _, err := c.kv.Delete(key, writeOptions.WithContext(ctx)) + return err +} + func checkLastIndex(index, metaLastIndex uint64) (newIndex uint64, skip bool) { // See https://www.consul.io/api/features/blocking.html#implementation-details for logic behind these checks. if metaLastIndex == 0 { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/metrics.go index ba1a5606b1ae..99910a16a33e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/metrics.go @@ -59,6 +59,17 @@ func (c consulMetrics) List(path string, options *consul.QueryOptions) (consul.K return kvps, meta, err } +func (c consulMetrics) Delete(key string, options *consul.WriteOptions) (*consul.WriteMeta, error) { + var meta *consul.WriteMeta + err := instrument.CollectedRequest(options.Context(), "Delete", consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + options = options.WithContext(ctx) + var err error + meta, err = c.kv.Delete(key, options) + return err + }) + return meta, err +} + func (c consulMetrics) Put(p *consul.KVPair, options *consul.WriteOptions) (*consul.WriteMeta, error) { var result *consul.WriteMeta err := instrument.CollectedRequest(options.Context(), "Put", consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go index 03a0f90091dd..708bea76205f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go @@ -164,17 +164,19 @@ func (m *mockKV) List(prefix string, q *consul.QueryOptions) (consul.KVPairs, *c m.mtx.Lock() defer m.mtx.Unlock() - deadline := time.Now().Add(mockedMaxWaitTime(q.WaitTime)) - if ctxDeadline, ok := q.Context().Deadline(); ok && ctxDeadline.Before(deadline) { - // respect deadline from context, if set. - deadline = ctxDeadline - } + if q.WaitTime > 0 { + deadline := time.Now().Add(mockedMaxWaitTime(q.WaitTime)) + if ctxDeadline, ok := q.Context().Deadline(); ok && ctxDeadline.Before(deadline) { + // respect deadline from context, if set. + deadline = ctxDeadline + } - for q.WaitIndex >= m.current && time.Now().Before(deadline) { - m.cond.Wait() - } - if time.Now().After(deadline) { - return nil, &consul.QueryMeta{LastIndex: q.WaitIndex}, nil + for q.WaitIndex >= m.current && time.Now().Before(deadline) { + m.cond.Wait() + } + if time.Now().After(deadline) { + return nil, &consul.QueryMeta{LastIndex: q.WaitIndex}, nil + } } result := consul.KVPairs{} @@ -187,6 +189,13 @@ func (m *mockKV) List(prefix string, q *consul.QueryOptions) (consul.KVPairs, *c return result, &consul.QueryMeta{LastIndex: m.current}, nil } +func (m *mockKV) Delete(key string, q *consul.WriteOptions) (*consul.WriteMeta, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + delete(m.kvps, key) + return nil, nil +} + func (m *mockKV) ResetIndex() { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go index 3a75b197cf46..ea938616239d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go @@ -212,14 +212,35 @@ outer: } } +// List implements kv.Client. +func (c *Client) List(ctx context.Context, prefix string) ([]string, error) { + resp, err := c.cli.Get(ctx, prefix, clientv3.WithPrefix()) + if err != nil { + return nil, err + } + keys := make([]string, 0, len(resp.Kvs)) + for _, kv := range resp.Kvs { + keys = append(keys, string(kv.Key)) + } + return keys, nil +} + // Get implements kv.Client. func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { resp, err := c.cli.Get(ctx, key) if err != nil { return nil, err } - if len(resp.Kvs) != 1 { - return nil, fmt.Errorf("got %d kvs, expected 1", len(resp.Kvs)) + if len(resp.Kvs) == 0 { + return nil, nil + } else if len(resp.Kvs) != 1 { + return nil, fmt.Errorf("got %d kvs, expected 1 or 0", len(resp.Kvs)) } return c.codec.Decode(resp.Kvs[0].Value) } + +// Delete implements kv.Client. +func (c *Client) Delete(ctx context.Context, key string) error { + _, err := c.cli.Delete(ctx, key) + return err +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go index e6e56f7c39ca..66e0f9b2ce7c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go @@ -46,11 +46,21 @@ func NewClient(kv *KV, codec codec.Codec) (*Client, error) { }, nil } +// List is part of kv.Client interface. +func (c *Client) List(ctx context.Context, prefix string) ([]string, error) { + return c.kv.List(prefix), nil +} + // Get is part of kv.Client interface. func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { return c.kv.Get(key, c.codec) } +// Delete is part of kv.Client interface. +func (c *Client) Delete(ctx context.Context, key string) error { + return errors.New("memberlist does not support Delete") +} + // CAS is part of kv.Client interface func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { return c.kv.CAS(ctx, key, c.codec, f) @@ -340,6 +350,21 @@ func (m *KV) Stop() { } } +// List returns all known keys under a given prefix. +// No communication with other nodes in the cluster is done here. +func (m *KV) List(prefix string) []string { + m.storeMu.Lock() + defer m.storeMu.Unlock() + + var keys []string + for k := range m.store { + if strings.HasPrefix(k, prefix) { + keys = append(keys, k) + } + } + return keys +} + // Get returns current value associated with given key. // No communication with other nodes in the cluster is done here. func (m *KV) Get(key string, codec codec.Codec) (interface{}, error) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go index c1f260dd1853..1e905c935510 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go @@ -35,6 +35,16 @@ type metrics struct { c Client } +func (m metrics) List(ctx context.Context, prefix string) ([]string, error) { + var result []string + err := instrument.CollectedRequest(ctx, "List", requestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var err error + result, err = m.c.List(ctx, prefix) + return err + }) + return result, err +} + func (m metrics) Get(ctx context.Context, key string) (interface{}, error) { var result interface{} err := instrument.CollectedRequest(ctx, "GET", requestDuration, instrument.ErrorCode, func(ctx context.Context) error { @@ -45,6 +55,13 @@ func (m metrics) Get(ctx context.Context, key string) (interface{}, error) { return result, err } +func (m metrics) Delete(ctx context.Context, key string) error { + err := instrument.CollectedRequest(ctx, "Delete", requestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return m.c.Delete(ctx, key) + }) + return err +} + func (m metrics) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { return instrument.CollectedRequest(ctx, "CAS", requestDuration, errorCode, func(ctx context.Context) error { return m.c.CAS(ctx, key, f) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go index fbbed83f794c..3817725fe3a3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go @@ -283,11 +283,22 @@ func (m *MultiClient) runWithPrimaryClient(origCtx context.Context, fn func(newC } } +// List is a part of the kv.Client interface. +func (m *MultiClient) List(ctx context.Context, prefix string) ([]string, error) { + _, kv := m.getPrimaryClient() + return kv.client.List(ctx, prefix) +} + // Get is a part of kv.Client interface. func (m *MultiClient) Get(ctx context.Context, key string) (interface{}, error) { _, kv := m.getPrimaryClient() - val, err := kv.client.Get(ctx, key) - return val, err + return kv.client.Get(ctx, key) +} + +// Delete is a part of the kv.Client interface. +func (m *MultiClient) Delete(ctx context.Context, key string) error { + _, kv := m.getPrimaryClient() + return kv.client.Delete(ctx, key) } // CAS is a part of kv.Client interface. diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go index d5c21d2c8569..5775cf17b947 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go @@ -16,6 +16,24 @@ func PrefixClient(client Client, prefix string) Client { return &prefixedKVClient{prefix, client} } +// List returns a list of keys under a given prefix. +func (c *prefixedKVClient) List(ctx context.Context, prefix string) ([]string, error) { + keys, err := c.client.List(ctx, c.prefix+prefix) + if err != nil { + return nil, err + } + + // Remove the prefix from the returned key. The prefix attached to the + // prefixed client is supposed to be transparent and the values returned + // by List should be able to be immediately inserted into the Get + // function, which means that our injected prefix needs to be removed. + for i := range keys { + keys[i] = strings.TrimPrefix(keys[i], c.prefix) + } + + return keys, nil +} + // CAS atomically modifies a value in a callback. If the value doesn't exist, // you'll get 'nil' as an argument to your callback. func (c *prefixedKVClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { @@ -34,6 +52,12 @@ func (c *prefixedKVClient) WatchPrefix(ctx context.Context, prefix string, f fun }) } +// Get looks up a given object from its key. func (c *prefixedKVClient) Get(ctx context.Context, key string) (interface{}, error) { return c.client.Get(ctx, c.prefix+key) } + +// Delete removes a given object from its key. +func (c *prefixedKVClient) Delete(ctx context.Context, key string) error { + return c.client.Delete(ctx, c.prefix+key) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index 1b90f5f151f9..1aa0f7aadf53 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -98,7 +98,7 @@ func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.Flag f.StringVar(&cfg.Addr, prefix+"lifecycler.addr", "", "IP address to advertise in consul.") f.IntVar(&cfg.Port, prefix+"lifecycler.port", 0, "port to advertise in consul (defaults to server.grpc-listen-port).") f.StringVar(&cfg.ID, prefix+"lifecycler.ID", hostname, "ID to register into consul.") - f.StringVar(&cfg.Zone, prefix+"availability-zone", "", "The availability zone of the host, this instance is running on. Default is the lifecycler ID.") + f.StringVar(&cfg.Zone, prefix+"availability-zone", "", "The availability zone of the host, this instance is running on. Default is an empty string, which disables zone awareness for writes.") } // Lifecycler is responsible for managing the lifecycle of entries in the ring. @@ -139,18 +139,11 @@ type Lifecycler struct { // NewLifecycler creates new Lifecycler. It must be started via StartAsync. func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, ringName, ringKey string, flushOnShutdown bool) (*Lifecycler, error) { - addr := cfg.Addr - if addr == "" { - var err error - addr, err = util.GetFirstAddressOf(cfg.InfNames) - if err != nil { - return nil, err - } - } - port := cfg.Port - if port == 0 { - port = *cfg.ListenPort + addr, err := GetInstanceAddr(cfg.Addr, cfg.InfNames) + if err != nil { + return nil, err } + port := GetInstancePort(cfg.Port, *cfg.ListenPort) codec := GetCodec() store, err := kv.NewClient(cfg.RingConfig.KVStore, codec) if err != nil { @@ -162,10 +155,6 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, ringNa util.WarnExperimentalUse("Zone aware replication") } - if zone == "" { - zone = cfg.ID - } - // We do allow a nil FlushTransferer, but to keep the ring logic easier we assume // it's always set, so we use a noop FlushTransferer if flushTransferer == nil { @@ -667,6 +656,7 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error { ingesterDesc.Timestamp = time.Now().Unix() ingesterDesc.State = i.GetState() ingesterDesc.Addr = i.Addr + ingesterDesc.Zone = i.Zone ringDesc.Ingesters[i.ID] = ingesterDesc } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go index f13965b4e7e8..a7de9df70f2d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go @@ -37,7 +37,7 @@ func NewDesc() *Desc { // AddIngester adds the given ingester to the ring. Ingester will only use supplied tokens, // any other tokens are removed. -func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state IngesterState) { +func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state IngesterState) IngesterDesc { if d.Ingesters == nil { d.Ingesters = map[string]IngesterDesc{} } @@ -51,6 +51,7 @@ func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state Ingeste } d.Ingesters[id] = ingester + return ingester } // RemoveIngester removes the given ingester and all its tokens. @@ -125,13 +126,19 @@ func (i *IngesterDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration) b switch op { case Write: - healthy = (i.State == ACTIVE) + healthy = i.State == ACTIVE case Read: healthy = (i.State == ACTIVE) || (i.State == LEAVING) || (i.State == PENDING) case Reporting: healthy = true + + case BlocksSync: + healthy = (i.State == JOINING) || (i.State == ACTIVE) || (i.State == LEAVING) + + case BlocksRead: + healthy = i.State == ACTIVE } return healthy && time.Since(time.Unix(i.Timestamp, 0)) <= heartbeatTimeout @@ -381,7 +388,7 @@ type TokenDesc struct { Zone string } -// Returns sorted list of tokens with ingester names. +// getTokens returns sorted list of tokens with ingester IDs, owned by each ingester in the ring. func (d *Desc) getTokens() []TokenDesc { numTokens := 0 for _, ing := range d.Ingesters { @@ -397,3 +404,27 @@ func (d *Desc) getTokens() []TokenDesc { sort.Sort(ByToken(tokens)) return tokens } + +// TokenDescs holds a sorted list of TokenDesc. +type TokenDescs []TokenDesc + +func (t TokenDescs) Equals(other TokenDescs) bool { + if len(t) != len(other) { + return false + } + + for i := 0; i < len(t); i++ { + if (t[i].Token != other[i].Token) || (t[i].Ingester != other[i].Ingester) || (t[i].Zone != other[i].Zone) { + return false + } + } + + return true +} + +func GetOrCreateRingDesc(d interface{}) *Desc { + if d == nil { + return NewDesc() + } + return d.(*Desc) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go index 3c36307c2431..8717fd8cdcda 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go @@ -74,3 +74,14 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(*Ing return results, nil } + +// Includes returns whether the replication set includes the replica with the provided addr. +func (r ReplicationSet) Includes(addr string) bool { + for _, instance := range r.Ingesters { + if instance.GetAddr() == addr { + return true + } + } + + return false +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go index a4de06289960..9231b8d8fd3b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go @@ -2,22 +2,37 @@ package ring import ( "fmt" + "time" ) -// replicationStrategy decides, given the set of ingesters eligible for a key, +type ReplicationStrategy interface { + // Filter out unhealthy instances and checks if there're enough instances + // for an operation to succeed. Returns an error if there are not enough + // instances. + Filter(instances []IngesterDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration) (healthy []IngesterDesc, maxFailures int, err error) + + // ShouldExtendReplicaSet returns true if given an instance that's going to be + // added to the replica set, the replica set size should be extended by 1 + // more instance for the given operation. + ShouldExtendReplicaSet(instance IngesterDesc, op Operation) bool +} + +type DefaultReplicationStrategy struct{} + +// Filter decides, given the set of ingesters eligible for a key, // which ingesters you will try and write to and how many failures you will // tolerate. // - Filters out dead ingesters so the one doesn't even try to write to them. // - Checks there is enough ingesters for an operation to succeed. // The ingesters argument may be overwritten. -func (r *Ring) replicationStrategy(ingesters []IngesterDesc, op Operation) ([]IngesterDesc, int, error) { +func (s *DefaultReplicationStrategy) Filter(ingesters []IngesterDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration) ([]IngesterDesc, int, error) { // We need a response from a quorum of ingesters, which is n/2 + 1. In the // case of a node joining/leaving, the actual replica set might be bigger // than the replication factor, so use the bigger or the two. - replicationFactor := r.cfg.ReplicationFactor if len(ingesters) > replicationFactor { replicationFactor = len(ingesters) } + minSuccess := (replicationFactor / 2) + 1 maxFailure := replicationFactor - minSuccess @@ -25,7 +40,7 @@ func (r *Ring) replicationStrategy(ingesters []IngesterDesc, op Operation) ([]In // included in the calculation of minSuccess, so if too many failed ingesters // will cause the whole write to fail. for i := 0; i < len(ingesters); { - if r.IsHealthy(&ingesters[i], op) { + if ingesters[i].IsHealthy(op, heartbeatTimeout) { i++ } else { ingesters = append(ingesters[:i], ingesters[i+1:]...) @@ -44,6 +59,22 @@ func (r *Ring) replicationStrategy(ingesters []IngesterDesc, op Operation) ([]In return ingesters, maxFailure, nil } +func (s *DefaultReplicationStrategy) ShouldExtendReplicaSet(ingester IngesterDesc, op Operation) bool { + // We do not want to Write to Ingesters that are not ACTIVE, but we do want + // to write the extra replica somewhere. So we increase the size of the set + // of replicas for the key. This means we have to also increase the + // size of the replica set for read, but we can read from Leaving ingesters, + // so don't skip it in this case. + // NB dead ingester will be filtered later by DefaultReplicationStrategy.Filter(). + if op == Write && ingester.State != ACTIVE { + return true + } else if op == Read && (ingester.State != ACTIVE && ingester.State != LEAVING) { + return true + } + + return false +} + // IsHealthy checks whether an ingester appears to be alive and heartbeating func (r *Ring) IsHealthy(ingester *IngesterDesc, op Operation) bool { return ingester.IsHealthy(op, r.cfg.HeartbeatTimeout) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go index ec9243688e89..33751d1b9b4d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go @@ -34,9 +34,6 @@ const ( // CompactorRingKey is the key under which we store the compactors ring in the KVStore. CompactorRingKey = "compactor" - - // StoreGatewayRingKey is the key under which we store the store gateways ring in the KVStore. - StoreGatewayRingKey = "store-gateway" ) // ReadRing represents the read interface to the ring. @@ -61,10 +58,22 @@ const ( Read Operation = iota Write Reporting // Special value for inquiring about health + + // BlocksSync is the operation run by the store-gateway to sync blocks. + BlocksSync + + // BlocksRead is the operation run by the querier to query blocks via the store-gateway. + BlocksRead ) -// ErrEmptyRing is the error returned when trying to get an element when nothing has been added to hash. -var ErrEmptyRing = errors.New("empty ring") +var ( + // ErrEmptyRing is the error returned when trying to get an element when nothing has been added to hash. + ErrEmptyRing = errors.New("empty ring") + + // ErrInstanceNotFound is the error returned when trying to get information for an instance + // not registered within the ring. + ErrInstanceNotFound = errors.New("instance not found in the ring") +) // Config for a Ring type Config struct { @@ -90,10 +99,10 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { type Ring struct { services.Service - name string key string cfg Config KVClient kv.Client + strategy ReplicationStrategy mtx sync.RWMutex ringDesc *Desc @@ -108,45 +117,55 @@ type Ring struct { // New creates a new Ring. Being a service, Ring needs to be started to do anything. func New(cfg Config, name, key string) (*Ring, error) { - if cfg.ReplicationFactor <= 0 { - return nil, fmt.Errorf("ReplicationFactor must be greater than zero: %d", cfg.ReplicationFactor) - } codec := GetCodec() store, err := kv.NewClient(cfg.KVStore, codec) if err != nil { return nil, err } + return NewWithStoreClientAndStrategy(cfg, name, key, store, &DefaultReplicationStrategy{}) +} + +func NewWithStoreClientAndStrategy(cfg Config, name, key string, store kv.Client, strategy ReplicationStrategy) (*Ring, error) { + if cfg.ReplicationFactor <= 0 { + return nil, fmt.Errorf("ReplicationFactor must be greater than zero: %d", cfg.ReplicationFactor) + } + r := &Ring{ - name: name, key: key, cfg: cfg, KVClient: store, + strategy: strategy, ringDesc: &Desc{}, memberOwnershipDesc: prometheus.NewDesc( "cortex_ring_member_ownership_percent", "The percent ownership of the ring by member", - []string{"member", "name"}, nil, + []string{"member"}, + map[string]string{"name": name}, ), numMembersDesc: prometheus.NewDesc( "cortex_ring_members", "Number of members in the ring", - []string{"state", "name"}, nil, + []string{"state"}, + map[string]string{"name": name}, ), totalTokensDesc: prometheus.NewDesc( "cortex_ring_tokens_total", "Number of tokens in the ring", - []string{"name"}, nil, + nil, + map[string]string{"name": name}, ), numTokensDesc: prometheus.NewDesc( "cortex_ring_tokens_owned", "The number of tokens in the ring owned by the member", - []string{"member", "name"}, nil, + []string{"member"}, + map[string]string{"name": name}, ), oldestTimestampDesc: prometheus.NewDesc( "cortex_ring_oldest_member_timestamp", "Timestamp of the oldest member in the ring.", - []string{"state", "name"}, nil, + []string{"state"}, + map[string]string{"name": name}, ), } @@ -208,22 +227,16 @@ func (r *Ring) Get(key uint32, op Operation, buf []IngesterDesc) (ReplicationSet distinctHosts[token.Ingester] = struct{}{} ingester := r.ringDesc.Ingesters[token.Ingester] - // We do not want to Write to Ingesters that are not ACTIVE, but we do want - // to write the extra replica somewhere. So we increase the size of the set - // of replicas for the key. This means we have to also increase the - // size of the replica set for read, but we can read from Leaving ingesters, - // so don't skip it in this case. - // NB dead ingester will be filtered later (by replication_strategy.go). - if op == Write && ingester.State != ACTIVE { - n++ - } else if op == Read && (ingester.State != ACTIVE && ingester.State != LEAVING) { + // Check whether the replica set should be extended given we're including + // this instance. + if r.strategy.ShouldExtendReplicaSet(ingester, op) { n++ } ingesters = append(ingesters, ingester) } - liveIngesters, maxFailure, err := r.replicationStrategy(ingesters, op) + liveIngesters, maxFailure, err := r.strategy.Filter(ingesters, op, r.cfg.ReplicationFactor, r.cfg.HeartbeatTimeout) if err != nil { return ReplicationSet{}, err } @@ -264,6 +277,37 @@ func (r *Ring) GetAll() (ReplicationSet, error) { }, nil } +// GetAllTokens returns all ring tokens of healthy instances for the given operation. +func (r *Ring) GetAllTokens(op Operation) TokenDescs { + r.mtx.RLock() + defer r.mtx.RUnlock() + + all := make([]TokenDesc, 0, len(r.ringTokens)) + cache := map[string]bool{} + + for _, token := range r.ringTokens { + healthy, ok := cache[token.Ingester] + if !ok { + if instance, exists := r.ringDesc.Ingesters[token.Ingester]; exists { + healthy = r.IsHealthy(&instance, op) + } else { + // Shouldn't never happen unless a bug but in case we consider it unhealthy. + healthy = false + } + + cache[token.Ingester] = healthy + } + + if healthy { + // Given ringTokens is sorted and we iterate it in order, we can simply + // append to the result while keeping ordering. + all = append(all, token) + } + } + + return all +} + func (r *Ring) search(key uint32) int { i := sort.Search(len(r.ringTokens), func(x int) bool { return r.ringTokens[x].Token > key @@ -279,6 +323,7 @@ func (r *Ring) Describe(ch chan<- *prometheus.Desc) { ch <- r.memberOwnershipDesc ch <- r.numMembersDesc ch <- r.totalTokensDesc + ch <- r.oldestTimestampDesc ch <- r.numTokensDesc } @@ -318,14 +363,12 @@ func (r *Ring) Collect(ch chan<- prometheus.Metric) { prometheus.GaugeValue, float64(totalOwned)/float64(math.MaxUint32), id, - r.name, ) ch <- prometheus.MustNewConstMetric( r.numTokensDesc, prometheus.GaugeValue, float64(numTokens[id]), id, - r.name, ) } @@ -355,7 +398,6 @@ func (r *Ring) Collect(ch chan<- prometheus.Metric) { prometheus.GaugeValue, float64(count), state, - r.name, ) } for state, timestamp := range oldestTimestampByState { @@ -364,7 +406,6 @@ func (r *Ring) Collect(ch chan<- prometheus.Metric) { prometheus.GaugeValue, float64(timestamp), state, - r.name, ) } @@ -372,7 +413,6 @@ func (r *Ring) Collect(ch chan<- prometheus.Metric) { r.totalTokensDesc, prometheus.GaugeValue, float64(len(r.ringTokens)), - r.name, ) } @@ -423,8 +463,8 @@ func (r *Ring) Subring(key uint32, n int) (ReadRing, error) { } sub := &Ring{ - name: "subring", - cfg: r.cfg, + cfg: r.cfg, + strategy: r.strategy, ringDesc: &Desc{ Ingesters: ingesters, }, @@ -440,3 +480,18 @@ func (r *Ring) Subring(key uint32, n int) (ReadRing, error) { return sub, nil } + +// GetInstanceState returns the current state of an instance or an error if the +// instance does not exist in the ring. +func (r *Ring) GetInstanceState(instanceID string) (IngesterState, error) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + instances := r.ringDesc.GetIngesters() + instance, ok := instances[instanceID] + if !ok { + return PENDING, ErrInstanceNotFound + } + + return instance.GetState(), nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/tokens.go b/vendor/github.com/cortexproject/cortex/pkg/ring/tokens.go index 729b3a9c3675..3175008aaf1d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/tokens.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/tokens.go @@ -5,6 +5,7 @@ import ( "errors" "io/ioutil" "os" + "sort" ) // Tokens is a simple list of tokens. @@ -14,6 +15,25 @@ func (t Tokens) Len() int { return len(t) } func (t Tokens) Swap(i, j int) { t[i], t[j] = t[j], t[i] } func (t Tokens) Less(i, j int) bool { return t[i] < t[j] } +// Equals returns whether the tokens are equal to the input ones. +func (t Tokens) Equals(other Tokens) bool { + if len(t) != len(other) { + return false + } + + mine := t + sort.Sort(mine) + sort.Sort(other) + + for i := 0; i < len(mine); i++ { + if mine[i] != other[i] { + return false + } + } + + return true +} + // StoreToFile stores the tokens in the given directory. func (t Tokens) StoreToFile(tokenFilePath string) error { if tokenFilePath == "" { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go index e30166fa7bc1..cf9fea3ed7fa 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go @@ -1,13 +1,20 @@ package ring import ( + "context" "math/rand" "time" + + "github.com/cortexproject/cortex/pkg/util" ) // GenerateTokens make numTokens unique random tokens, none of which clash // with takenTokens. func GenerateTokens(numTokens int, takenTokens []uint32) []uint32 { + if numTokens <= 0 { + return []uint32{} + } + r := rand.New(rand.NewSource(time.Now().UnixNano())) used := make(map[uint32]bool) @@ -25,5 +32,51 @@ func GenerateTokens(numTokens int, takenTokens []uint32) []uint32 { tokens = append(tokens, candidate) i++ } + return tokens } + +// GetInstanceAddr returns the address to use to register the instance +// in the ring. +func GetInstanceAddr(configAddr string, netInterfaces []string) (string, error) { + if configAddr != "" { + return configAddr, nil + } + + addr, err := util.GetFirstAddressOf(netInterfaces) + if err != nil { + return "", err + } + + return addr, nil +} + +// GetInstancePort returns the port to use to register the instance +// in the ring. +func GetInstancePort(configPort, listenPort int) int { + if configPort > 0 { + return configPort + } + + return listenPort +} + +// WaitInstanceState waits until the input instanceID is registered within the +// ring matching the provided state. A timeout should be provided within the context. +func WaitInstanceState(ctx context.Context, r *Ring, instanceID string, state IngesterState) error { + backoff := util.NewBackoff(ctx, util.BackoffConfig{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: time.Second, + MaxRetries: 0, + }) + + for backoff.Ongoing() { + if actualState, err := r.GetInstanceState(instanceID); err == nil && actualState == state { + return nil + } + + backoff.Wait() + } + + return backoff.Err() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go index 2101dfec5539..36a92a0e86fb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go @@ -22,7 +22,12 @@ type Config struct { } // RegisterFlags registers flags. -func (cfg *Config) RegisterFlags(prefix string, f *flag.FlagSet) { +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("", f) +} + +// RegisterFlagsWithPrefix registers flags with prefix. +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.IntVar(&cfg.MaxRecvMsgSize, prefix+".grpc-max-recv-msg-size", 100<<20, "gRPC client max receive message size (bytes).") f.IntVar(&cfg.MaxSendMsgSize, prefix+".grpc-max-send-msg-size", 16<<20, "gRPC client max send message size (bytes).") f.BoolVar(&cfg.UseGzipCompression, prefix+".grpc-use-gzip-compression", false, "Use compression when sending messages.") diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/instrumentation.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/instrumentation.go new file mode 100644 index 000000000000..b80101af7701 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/instrumentation.go @@ -0,0 +1,23 @@ +package grpcclient + +import ( + otgrpc "github.com/opentracing-contrib/go-grpc" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaveworks/common/middleware" + "google.golang.org/grpc" + + cortex_middleware "github.com/cortexproject/cortex/pkg/util/middleware" +) + +func Instrument(requestDuration *prometheus.HistogramVec) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { + return []grpc.UnaryClientInterceptor{ + otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), + middleware.ClientUserHeaderInterceptor, + cortex_middleware.PrometheusGRPCUnaryInstrumentation(requestDuration), + }, []grpc.StreamClientInterceptor{ + otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()), + middleware.StreamClientUserHeaderInterceptor, + cortex_middleware.PrometheusGRPCStreamInstrumentation(requestDuration), + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go index 537d3cfd9475..0dabf741fc54 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go @@ -142,12 +142,16 @@ func BuildMetricFamiliesPerUserFromUserRegistries(regs map[string]*prometheus.Re return data } -func (d MetricFamiliesPerUser) SendSumOfCounters(out chan<- prometheus.Metric, desc *prometheus.Desc, counter string) { +func (d MetricFamiliesPerUser) GetSumOfCounters(counter string) float64 { result := float64(0) for _, userMetrics := range d { result += userMetrics.SumCounters(counter) } - out <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, result) + return result +} + +func (d MetricFamiliesPerUser) SendSumOfCounters(out chan<- prometheus.Metric, desc *prometheus.Desc, counter string) { + out <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, d.GetSumOfCounters(counter)) } func (d MetricFamiliesPerUser) SendSumOfCountersWithLabels(out chan<- prometheus.Metric, desc *prometheus.Desc, counter string, labelNames ...string) { @@ -162,12 +166,16 @@ func (d MetricFamiliesPerUser) SendSumOfCountersPerUser(out chan<- prometheus.Me } } -func (d MetricFamiliesPerUser) SendSumOfGauges(out chan<- prometheus.Metric, desc *prometheus.Desc, gauge string) { +func (d MetricFamiliesPerUser) GetSumOfGauges(gauge string) float64 { result := float64(0) for _, userMetrics := range d { result += userMetrics.SumGauges(gauge) } - out <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, result) + return result +} + +func (d MetricFamiliesPerUser) SendSumOfGauges(out chan<- prometheus.Metric, desc *prometheus.Desc, gauge string) { + out <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, d.GetSumOfGauges(gauge)) } func (d MetricFamiliesPerUser) SendSumOfGaugesWithLabels(out chan<- prometheus.Metric, desc *prometheus.Desc, gauge string, labelNames ...string) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/services/README.md b/vendor/github.com/cortexproject/cortex/pkg/util/services/README.md index c9f3cc9483db..084deefea124 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/services/README.md +++ b/vendor/github.com/cortexproject/cortex/pkg/util/services/README.md @@ -2,13 +2,13 @@ This is a Go implementation of [services model](https://github.com/google/guava/wiki/ServiceExplained) from [Google Guava](https://github.com/google/guava) library. -It provides `Service` interface (with implementation in `BasicService` type) and `Manager` for managing group of services at once. +It provides `Service` interface (with implementation in `BasicService` type) and `Manager` for managing group of services at once. Main benefits of this model are: - Services have well-defined explicit states. Services are not supposed to start any work until they are started, and they are supposed to enter Running state only if they have successfully done all initialization in Starting state. - States are observable by clients. Client can not only see the state, but also wait for Running or Terminated state. -- If more observability is needed, clients can register state listeners. +- If more observability is needed, clients can register state listeners. - Service startup and shutdown is done asynchronously. This allows for nice parallelization of startup or shutdown of multiple services. - Services that depend on each other can simply wait for other service to be in correct state before using it. @@ -31,17 +31,17 @@ Once service is in `Terminated` or `Failed` state, it cannot be restarted, these Full state diagram: ```text - ┌────────────────────────────────────────────────────────────────────┐ - │ │ - │ ▼ + ┌────────────────────────────────────────────────────────────────────┐ + │ │ + │ ▼ ┌─────┐ ┌──────────┐ ┌─────────┐ ┌──────────┐ ┌────────────┐ │ New │─────▶│ Starting │─────▶│ Running │────▶│ Stopping │───┬─▶│ Terminated │ └─────┘ └──────────┘ └─────────┘ └──────────┘ │ └────────────┘ - │ │ - │ │ - │ │ ┌────────┐ - └──────────────────────────────────────────┴──▶│ Failed │ - └────────┘ + │ │ + │ │ + │ │ ┌────────┐ + └──────────────────────────────────────────┴──▶│ Failed │ + └────────┘ ``` API and states and semantics are implemented to correspond to [Service class](https://guava.dev/releases/snapshot/api/docs/com/google/common/util/concurrent/Service.html) in Guava library. diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index abf69dbec51c..7af7876ed6ad 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -22,21 +22,23 @@ const ( // limits via flags, or per-user limits via yaml config. type Limits struct { // Distributor enforced limits. - IngestionRate float64 `yaml:"ingestion_rate"` - IngestionRateStrategy string `yaml:"ingestion_rate_strategy"` - IngestionBurstSize int `yaml:"ingestion_burst_size"` - AcceptHASamples bool `yaml:"accept_ha_samples"` - HAClusterLabel string `yaml:"ha_cluster_label"` - HAReplicaLabel string `yaml:"ha_replica_label"` - DropLabels flagext.StringSlice `yaml:"drop_labels"` - MaxLabelNameLength int `yaml:"max_label_name_length"` - MaxLabelValueLength int `yaml:"max_label_value_length"` - MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series"` - RejectOldSamples bool `yaml:"reject_old_samples"` - RejectOldSamplesMaxAge time.Duration `yaml:"reject_old_samples_max_age"` - CreationGracePeriod time.Duration `yaml:"creation_grace_period"` - EnforceMetricName bool `yaml:"enforce_metric_name"` - SubringSize int `yaml:"user_subring_size"` + IngestionRate float64 `yaml:"ingestion_rate"` + IngestionRateStrategy string `yaml:"ingestion_rate_strategy"` + IngestionBurstSize int `yaml:"ingestion_burst_size"` + AcceptHASamples bool `yaml:"accept_ha_samples"` + HAClusterLabel string `yaml:"ha_cluster_label"` + HAReplicaLabel string `yaml:"ha_replica_label"` + DropLabels flagext.StringSlice `yaml:"drop_labels"` + MaxLabelNameLength int `yaml:"max_label_name_length"` + MaxLabelValueLength int `yaml:"max_label_value_length"` + MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series"` + MaxMetadataLength int `yaml:"max_metadata_length"` + RejectOldSamples bool `yaml:"reject_old_samples"` + RejectOldSamplesMaxAge time.Duration `yaml:"reject_old_samples_max_age"` + CreationGracePeriod time.Duration `yaml:"creation_grace_period"` + EnforceMetadataMetricName bool `yaml:"enforce_metadata_metric_name"` + EnforceMetricName bool `yaml:"enforce_metric_name"` + SubringSize int `yaml:"user_subring_size"` // Ingester enforced limits. MaxSeriesPerQuery int `yaml:"max_series_per_query"` @@ -71,10 +73,12 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxLabelNameLength, "validation.max-length-label-name", 1024, "Maximum length accepted for label names") f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name") f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.") + f.IntVar(&l.MaxMetadataLength, "validation.max-metadata-length", 1024, "Maximum length accepted for metric metadata. Metadata refers to Metric Name, HELP and UNIT.") f.BoolVar(&l.RejectOldSamples, "validation.reject-old-samples", false, "Reject old samples.") f.DurationVar(&l.RejectOldSamplesMaxAge, "validation.reject-old-samples.max-age", 14*24*time.Hour, "Maximum accepted sample age before rejecting.") f.DurationVar(&l.CreationGracePeriod, "validation.create-grace-period", 10*time.Minute, "Duration which table will be created/deleted before/after it's needed; we won't accept sample from before this time.") f.BoolVar(&l.EnforceMetricName, "validation.enforce-metric-name", true, "Enforce every sample has a metric name.") + f.BoolVar(&l.EnforceMetadataMetricName, "validation.enforce-metadata-metric-name", true, "Enforce every metadata has a metric name.") f.IntVar(&l.MaxSeriesPerQuery, "ingester.max-series-per-query", 100000, "The maximum number of series that a query can return.") f.IntVar(&l.MaxSamplesPerQuery, "ingester.max-samples-per-query", 1000000, "The maximum number of samples that a query can return.") @@ -204,6 +208,12 @@ func (o *Overrides) MaxLabelNamesPerSeries(userID string) int { return o.getOverridesForUser(userID).MaxLabelNamesPerSeries } +// MaxMetadataLength returns maximum length metadata can be. Metadata refers +// to the Metric Name, HELP and UNIT. +func (o *Overrides) MaxMetadataLength(userID string) int { + return o.getOverridesForUser(userID).MaxMetadataLength +} + // RejectOldSamples returns true when we should reject samples older than certain // age. func (o *Overrides) RejectOldSamples(userID string) bool { @@ -272,6 +282,11 @@ func (o *Overrides) EnforceMetricName(userID string) bool { return o.getOverridesForUser(userID).EnforceMetricName } +// EnforceMetadataMetricName whether to enforce the presence of a metric name on metadata. +func (o *Overrides) EnforceMetadataMetricName(userID string) bool { + return o.getOverridesForUser(userID).EnforceMetadataMetricName +} + // CardinalityLimit returns the maximum number of timeseries allowed in a query. func (o *Overrides) CardinalityLimit(userID string) int { return o.getOverridesForUser(userID).CardinalityLimit diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go index 12476479e0a2..b3802d9c7dc1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go @@ -17,6 +17,17 @@ import ( const ( discardReasonLabel = "reason" + errMetadataMissingMetricName = "metadata missing metric name" + errMetadataTooLong = "metadata '%s' value too long: %.200q metric %.200q" + + typeMetricName = "METRIC_NAME" + typeHelp = "HELP" + typeUnit = "UNIT" + + metricNameTooLong = "metric_name_too_long" + helpTooLong = "help_too_long" + unitTooLong = "unit_too_long" + errMissingMetricName = "sample missing metric name" errInvalidMetricName = "sample invalid metric name: %.200q" errInvalidLabel = "sample invalid label: %.200q metric %.200q" @@ -55,9 +66,17 @@ var DiscardedSamples = prometheus.NewCounterVec( }, []string{discardReasonLabel, "user"}, ) +var DiscardedMetadata = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cortex_discarded_metadata_total", + Help: "The total number of metadata that were discarded.", + }, + []string{discardReasonLabel, "user"}, +) func init() { prometheus.MustRegister(DiscardedSamples) + prometheus.MustRegister(DiscardedMetadata) } // SampleValidationConfig helps with getting required config to validate sample. @@ -150,6 +169,45 @@ func ValidateLabels(cfg LabelValidationConfig, userID string, ls []client.LabelA return nil } +// MetadataValidationConfig helps with getting required config to validate metadata. +type MetadataValidationConfig interface { + EnforceMetadataMetricName(userID string) bool + MaxMetadataLength(userID string) int +} + +// ValidateMetadata returns an err if a metric metadata is invalid. +func ValidateMetadata(cfg MetadataValidationConfig, userID string, metadata *client.MetricMetadata) error { + if cfg.EnforceMetadataMetricName(userID) && metadata.MetricName == "" { + DiscardedMetadata.WithLabelValues(missingMetricName, userID).Inc() + return httpgrpc.Errorf(http.StatusBadRequest, errMetadataMissingMetricName) + } + + maxMetadataValueLength := cfg.MaxMetadataLength(userID) + var reason string + var cause string + var metadataType string + if len(metadata.MetricName) > maxMetadataValueLength { + metadataType = typeMetricName + reason = metricNameTooLong + cause = metadata.MetricName + } else if len(metadata.Help) > maxMetadataValueLength { + metadataType = typeHelp + reason = helpTooLong + cause = metadata.Help + } else if len(metadata.Unit) > maxMetadataValueLength { + metadataType = typeUnit + reason = unitTooLong + cause = metadata.Unit + } + + if reason != "" { + DiscardedMetadata.WithLabelValues(reason, userID).Inc() + return httpgrpc.Errorf(http.StatusBadRequest, errMetadataTooLong, metadataType, cause, metadata.MetricName) + } + + return nil +} + // this function formats label adapters as a metric name with labels, while preserving // label order, and keeping duplicates. If there are multiple "__name__" labels, only // first one is used as metric name, other ones will be included as regular labels. diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go index 4e3fb492e2eb..b9b95ce90889 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go @@ -140,7 +140,7 @@ func askServerForName(name string, qType dns.Type, client *dns.Client, servAddr if response.Truncated { if client.Net == "tcp" { - return nil, errors.Errorf("got truncated message on TCP (64kiB limit exceeded?)") + return nil, errors.New("got truncated message on TCP (64kiB limit exceeded?)") } // TCP fallback. diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go index a3e073073093..0ef944574dd1 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go @@ -12,7 +12,9 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns" + "github.com/thanos-io/thanos/pkg/extprom" ) // Provider is a stateful cache for asynchronous DNS resolutions. It provides a way to resolve addresses and obtain them. @@ -23,7 +25,7 @@ type Provider struct { resolved map[string][]string logger log.Logger - resolverAddrs *prometheus.GaugeVec + resolverAddrs *extprom.TxGaugeVec resolverLookupsCount prometheus.Counter resolverFailuresCount prometheus.Counter } @@ -56,26 +58,20 @@ func NewProvider(logger log.Logger, reg prometheus.Registerer, resolverType Reso resolver: NewResolver(resolverType.ToResolver(logger)), resolved: make(map[string][]string), logger: logger, - resolverAddrs: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + resolverAddrs: extprom.NewTxGaugeVec(reg, prometheus.GaugeOpts{ Name: "dns_provider_results", Help: "The number of resolved endpoints for each configured address", }, []string{"addr"}), - resolverLookupsCount: prometheus.NewCounter(prometheus.CounterOpts{ + resolverLookupsCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "dns_lookups_total", Help: "The number of DNS lookups resolutions attempts", }), - resolverFailuresCount: prometheus.NewCounter(prometheus.CounterOpts{ + resolverFailuresCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "dns_failures_total", Help: "The number of DNS lookup failures", }), } - if reg != nil { - reg.MustRegister(p.resolverAddrs) - reg.MustRegister(p.resolverLookupsCount) - reg.MustRegister(p.resolverFailuresCount) - } - return p } @@ -91,6 +87,23 @@ func (p *Provider) Clone() *Provider { } } +// IsDynamicNode returns if the specified StoreAPI addr uses +// any kind of SD mechanism. +func IsDynamicNode(addr string) bool { + qtype, _ := GetQTypeName(addr) + return qtype != "" +} + +// GetQTypeName splits the provided addr into two parts: the QType (if any) +// and the name. +func GetQTypeName(addr string) (qtype string, name string) { + qtypeAndName := strings.SplitN(addr, "+", 2) + if len(qtypeAndName) != 2 { + return "", addr + } + return qtypeAndName[0], qtypeAndName[1] +} + // Resolve stores a list of provided addresses or their DNS records if requested. // Addresses prefixed with `dns+` or `dnssrv+` will be resolved through respective DNS lookup (A/AAAA or SRV). // defaultPort is used for non-SRV records when a port is not supplied. @@ -98,15 +111,18 @@ func (p *Provider) Resolve(ctx context.Context, addrs []string) { p.Lock() defer p.Unlock() + p.resolverAddrs.ResetTx() + defer p.resolverAddrs.Submit() + + resolvedAddrs := map[string][]string{} for _, addr := range addrs { var resolved []string - qtypeAndName := strings.SplitN(addr, "+", 2) - if len(qtypeAndName) != 2 { - // No lookup specified. Add to results and continue to the next address. - p.resolved[addr] = []string{addr} + qtype, name := GetQTypeName(addr) + if qtype == "" { + resolvedAddrs[name] = []string{name} + p.resolverAddrs.WithLabelValues(name).Set(1.0) continue } - qtype, name := qtypeAndName[0], qtypeAndName[1] resolved, err := p.resolver.Resolve(ctx, name, QType(qtype)) p.resolverLookupsCount.Inc() @@ -114,20 +130,13 @@ func (p *Provider) Resolve(ctx context.Context, addrs []string) { // The DNS resolution failed. Continue without modifying the old records. p.resolverFailuresCount.Inc() level.Error(p.logger).Log("msg", "dns resolution failed", "addr", addr, "err", err) - continue - } - p.resolved[addr] = resolved - } - - // Remove stored addresses that are no longer requested. - for existingAddr := range p.resolved { - if !contains(addrs, existingAddr) { - delete(p.resolved, existingAddr) - p.resolverAddrs.DeleteLabelValues(existingAddr) - } else { - p.resolverAddrs.WithLabelValues(existingAddr).Set(float64(len(p.resolved[existingAddr]))) + // Use cached values. + resolved = p.resolved[addr] } + resolvedAddrs[addr] = resolved + p.resolverAddrs.WithLabelValues(addr).Set(float64(len(resolved))) } + p.resolved = resolvedAddrs } // Addresses returns the latest addresses present in the Provider. @@ -141,12 +150,3 @@ func (p *Provider) Addresses() []string { } return result } - -func contains(slice []string, str string) bool { - for _, s := range slice { - if str == s { - return true - } - } - return false -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/extprom/extprom.go b/vendor/github.com/thanos-io/thanos/pkg/extprom/extprom.go new file mode 100644 index 000000000000..5da95f3b1726 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/extprom/extprom.go @@ -0,0 +1,24 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package extprom + +import "github.com/prometheus/client_golang/prometheus" + +// WrapRegistererWithPrefix is like prometheus.WrapRegistererWithPrefix but it passes nil straight through +// which allows nil check. +func WrapRegistererWithPrefix(prefix string, reg prometheus.Registerer) prometheus.Registerer { + if reg == nil { + return nil + } + return prometheus.WrapRegistererWithPrefix(prefix, reg) +} + +// WrapRegistererWith is like prometheus.WrapRegistererWith but it passes nil straight through +// which allows nil check. +func WrapRegistererWith(labels prometheus.Labels, reg prometheus.Registerer) prometheus.Registerer { + if reg == nil { + return nil + } + return prometheus.WrapRegistererWith(labels, reg) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/extprom/testing.go b/vendor/github.com/thanos-io/thanos/pkg/extprom/testing.go new file mode 100644 index 000000000000..5db80b26e2a4 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/extprom/testing.go @@ -0,0 +1,35 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package extprom + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/testutil" +) + +// CurrentGaugeValuesFor returns gauge values for given metric names. Useful for testing based on registry, +// when you don't have access to metric variable. +func CurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames ...string) map[string]float64 { + f, err := reg.Gather() + testutil.Ok(t, err) + + res := make(map[string]float64, len(metricNames)) + for _, g := range f { + for _, m := range metricNames { + if g.GetName() != m { + continue + } + + testutil.Equals(t, 1, len(g.GetMetric())) + if _, ok := res[m]; ok { + t.Error("expected only one metric family for", m) + t.FailNow() + } + res[m] = *g.GetMetric()[0].GetGauge().Value + } + } + return res +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/extprom/tx_gauge.go b/vendor/github.com/thanos-io/thanos/pkg/extprom/tx_gauge.go new file mode 100644 index 000000000000..a619b122e978 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/extprom/tx_gauge.go @@ -0,0 +1,98 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package extprom + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type TxGaugeVec struct { + current *prometheus.GaugeVec + mtx sync.Mutex + newMetricVal func() *prometheus.GaugeVec + + tx *prometheus.GaugeVec +} + +// NewTxGaugeVec is a prometheus.GaugeVec that allows to start atomic metric value transaction. +// It might be useful if long process that wants to update a GaugeVec but wants to build/accumulate those metrics +// in a concurrent way without exposing partial state to Prometheus. +// Caller can also use this as normal GaugeVec. +// +// Additionally it allows to init LabelValues on each transaction. +// NOTE: This is quite naive implementation creating new prometheus.GaugeVec on each `ResetTx`, use wisely. +func NewTxGaugeVec(reg prometheus.Registerer, opts prometheus.GaugeOpts, labelNames []string, initLabelValues ...[]string) *TxGaugeVec { + // Nil as we will register it on our own later. + f := func() *prometheus.GaugeVec { + g := promauto.With(nil).NewGaugeVec(opts, labelNames) + for _, vals := range initLabelValues { + g.WithLabelValues(vals...) + } + return g + } + tx := &TxGaugeVec{ + current: f(), + newMetricVal: f, + } + if reg != nil { + reg.MustRegister(tx) + } + return tx +} + +// ResetTx starts new transaction. Not goroutine-safe. +func (tx *TxGaugeVec) ResetTx() { + tx.tx = tx.newMetricVal() +} + +// Submit atomically and fully applies new values from existing transaction GaugeVec. Not goroutine-safe. +func (tx *TxGaugeVec) Submit() { + if tx.tx == nil { + return + } + + tx.mtx.Lock() + tx.current = tx.tx + tx.mtx.Unlock() +} + +// Describe is used in Register. +func (tx *TxGaugeVec) Describe(ch chan<- *prometheus.Desc) { + tx.mtx.Lock() + defer tx.mtx.Unlock() + + tx.current.Describe(ch) +} + +// Collect is used by Registered. +func (tx *TxGaugeVec) Collect(ch chan<- prometheus.Metric) { + tx.mtx.Lock() + defer tx.mtx.Unlock() + + tx.current.Collect(ch) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (tx *TxGaugeVec) With(labels prometheus.Labels) prometheus.Gauge { + if tx.tx == nil { + tx.ResetTx() + } + return tx.tx.With(labels) +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (tx *TxGaugeVec) WithLabelValues(lvs ...string) prometheus.Gauge { + if tx.tx == nil { + tx.ResetTx() + } + return tx.tx.WithLabelValues(lvs...) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go new file mode 100644 index 000000000000..5b3e85940bfe --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go @@ -0,0 +1,159 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +// Package runutil provides helpers to advanced function scheduling control like repeat or retry. +// +// It's very often the case when you need to excutes some code every fixed intervals or have it retried automatically. +// To make it reliably with proper timeout, you need to carefully arrange some boilerplate for this. +// Below function does it for you. +// +// For repeat executes, use Repeat: +// +// err := runutil.Repeat(10*time.Second, stopc, func() error { +// // ... +// }) +// +// Retry starts executing closure function f until no error is returned from f: +// +// err := runutil.Retry(10*time.Second, stopc, func() error { +// // ... +// }) +// +// For logging an error on each f error, use RetryWithLog: +// +// err := runutil.RetryWithLog(logger, 10*time.Second, stopc, func() error { +// // ... +// }) +// +// Another use case for runutil package is when you want to close a `Closer` interface. As we all know, we should close all implements of `Closer`, such as *os.File. Commonly we will use: +// +// defer closer.Close() +// +// The problem is that Close() usually can return important error e.g for os.File the actual file flush might happen (and fail) on `Close` method. It's important to *always* check error. Thanos provides utility functions to log every error like those, allowing to put them in convenient `defer`: +// +// defer runutil.CloseWithLogOnErr(logger, closer, "log format message") +// +// For capturing error, use CloseWithErrCapture: +// +// var err error +// defer runutil.CloseWithErrCapture(&err, closer, "log format message") +// +// // ... +// +// If Close() returns error, err will capture it and return by argument. +// +// The rununtil.Exhaust* family of functions provide the same functionality but +// they take an io.ReadCloser and they exhaust the whole reader before closing +// them. They are useful when trying to use http keep-alive connections because +// for the same connection to be re-used the whole response body needs to be +// exhausted. +package runutil + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + tsdberrors "github.com/prometheus/prometheus/tsdb/errors" +) + +// Repeat executes f every interval seconds until stopc is closed or f returns an error. +// It executes f once right after being called. +func Repeat(interval time.Duration, stopc <-chan struct{}, f func() error) error { + tick := time.NewTicker(interval) + defer tick.Stop() + + for { + if err := f(); err != nil { + return err + } + select { + case <-stopc: + return nil + case <-tick.C: + } + } +} + +// Retry executes f every interval seconds until timeout or no error is returned from f. +func Retry(interval time.Duration, stopc <-chan struct{}, f func() error) error { + return RetryWithLog(log.NewNopLogger(), interval, stopc, f) +} + +// RetryWithLog executes f every interval seconds until timeout or no error is returned from f. It logs an error on each f error. +func RetryWithLog(logger log.Logger, interval time.Duration, stopc <-chan struct{}, f func() error) error { + tick := time.NewTicker(interval) + defer tick.Stop() + + var err error + for { + if err = f(); err == nil { + return nil + } + level.Error(logger).Log("msg", "function failed. Retrying in next tick", "err", err) + select { + case <-stopc: + return err + case <-tick.C: + } + } +} + +// CloseWithLogOnErr is making sure we log every error, even those from best effort tiny closers. +func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, a ...interface{}) { + err := closer.Close() + if err == nil { + return + } + + // Not a problem if it has been closed already. + if errors.Is(err, os.ErrClosed) { + return + } + + if logger == nil { + logger = log.NewLogfmtLogger(os.Stderr) + } + + level.Warn(logger).Log("msg", "detected close error", "err", errors.Wrap(err, fmt.Sprintf(format, a...))) +} + +// ExhaustCloseWithLogOnErr closes the io.ReadCloser with a log message on error but exhausts the reader before. +func ExhaustCloseWithLogOnErr(logger log.Logger, r io.ReadCloser, format string, a ...interface{}) { + _, err := io.Copy(ioutil.Discard, r) + if err != nil { + level.Warn(logger).Log("msg", "failed to exhaust reader, performance may be impeded", "err", err) + } + + CloseWithLogOnErr(logger, r, format, a...) +} + +// CloseWithErrCapture runs function and on error return error by argument including the given error (usually +// from caller function). +func CloseWithErrCapture(err *error, closer io.Closer, format string, a ...interface{}) { + merr := tsdberrors.MultiError{} + + merr.Add(*err) + merr.Add(errors.Wrapf(closer.Close(), format, a...)) + + *err = merr.Err() +} + +// ExhaustCloseWithErrCapture closes the io.ReadCloser with error capture but exhausts the reader before. +func ExhaustCloseWithErrCapture(err *error, r io.ReadCloser, format string, a ...interface{}) { + _, copyErr := io.Copy(ioutil.Discard, r) + + CloseWithErrCapture(err, r, format, a...) + + // Prepend the io.Copy error. + merr := tsdberrors.MultiError{} + merr.Add(copyErr) + merr.Add(*err) + + *err = merr.Err() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/testorbench.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/testorbench.go new file mode 100644 index 000000000000..5f7dc15ea714 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/testutil/testorbench.go @@ -0,0 +1,85 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package testutil + +import ( + "testing" +) + +// TB represents union of test and benchmark. +// This allows the same test suite to be run by both benchmark and test, helping to reuse more code. +// The reason is that usually benchmarks are not being run on CI, especially for short tests, so you need to recreate +// usually similar tests for `Test(t *testing.T)` methods. Example of usage is presented here: +// +// func TestTestOrBench(t *testing.T) { +// tb := NewTB(t) +// tb.Run("1", func(tb TB) { testorbenchComplexTest(tb) }) +// tb.Run("2", func(tb TB) { testorbenchComplexTest(tb) }) +// } +// +// func BenchmarkTestOrBench(b *testing.B) { +// tb := NewTB(t) +// tb.Run("1", func(tb TB) { testorbenchComplexTest(tb) }) +// tb.Run("2", func(tb TB) { testorbenchComplexTest(tb) }) +// } +type TB interface { + testing.TB + IsBenchmark() bool + Run(name string, f func(t TB)) bool + + SetBytes(n int64) + N() int + ResetTimer() +} + +// tb implements TB as well as testing.TB interfaces. +type tb struct { + testing.TB +} + +// NewTB creates tb from testing.TB. +func NewTB(t testing.TB) TB { return &tb{TB: t} } + +// Run benchmarks/tests f as a subbenchmark/subtest with the given name. It reports +// whether there were any failures. +// +// A subbenchmark/subtest is like any other benchmark/test. +func (t *tb) Run(name string, f func(t TB)) bool { + if b, ok := t.TB.(*testing.B); ok { + return b.Run(name, func(nested *testing.B) { f(&tb{TB: nested}) }) + } + if t, ok := t.TB.(*testing.T); ok { + return t.Run(name, func(nested *testing.T) { f(&tb{TB: nested}) }) + } + panic("not a benchmark and not a test") +} + +// N returns number of iterations to do for benchmark, 1 in case of test. +func (t *tb) N() int { + if b, ok := t.TB.(*testing.B); ok { + return b.N + } + return 1 +} + +// SetBytes records the number of bytes processed in a single operation for benchmark, noop otherwise. +// If this is called, the benchmark will report ns/op and MB/s. +func (t *tb) SetBytes(n int64) { + if b, ok := t.TB.(*testing.B); ok { + b.SetBytes(n) + } +} + +// ResetTimer resets a timer, if it's a benchmark, noop otherwise. +func (t *tb) ResetTimer() { + if b, ok := t.TB.(*testing.B); ok { + b.ResetTimer() + } +} + +// IsBenchmark returns true if it's a benchmark. +func (t *tb) IsBenchmark() bool { + _, ok := t.TB.(*testing.B) + return ok +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go b/vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go new file mode 100644 index 000000000000..8b1e8e1d4f16 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/testutil/testutil.go @@ -0,0 +1,154 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package testutil + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// Assert fails the test if the condition is false. +func Assert(tb testing.TB, condition bool, v ...interface{}) { + tb.Helper() + if condition { + return + } + _, file, line, _ := runtime.Caller(1) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf("\033[31m%s:%d: "+msg+"\033[39m\n\n", filepath.Base(file), line) +} + +// Ok fails the test if an err is not nil. +func Ok(tb testing.TB, err error, v ...interface{}) { + tb.Helper() + if err == nil { + return + } + _, file, line, _ := runtime.Caller(1) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) +} + +// NotOk fails the test if an err is nil. +func NotOk(tb testing.TB, err error, v ...interface{}) { + tb.Helper() + if err != nil { + return + } + _, file, line, _ := runtime.Caller(1) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n expected error, got nothing \033[39m\n\n", filepath.Base(file), line) +} + +// Equals fails the test if exp is not equal to act. +func Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { + tb.Helper() + if reflect.DeepEqual(exp, act) { + return + } + _, file, line, _ := runtime.Caller(1) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n\texp: %#v\n\n\tgot: %#v%s\033[39m\n\n", filepath.Base(file), line, exp, act, diff(exp, act)) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice, array or string. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { + return "" + } + + var e, a string + c := spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + } + if et != reflect.TypeOf("") { + e = c.Sdump(expected) + a = c.Sdump(actual) + } else { + e = reflect.ValueOf(expected).String() + a = reflect.ValueOf(actual).String() + } + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + return "\n\nDiff:\n" + diff +} + +// GatherAndCompare compares the metrics of a Gatherers pair. +func GatherAndCompare(t *testing.T, g1 prometheus.Gatherer, g2 prometheus.Gatherer, filter string) { + g1m, err := g1.Gather() + Ok(t, err) + g2m, err := g2.Gather() + Ok(t, err) + + var m1 *dto.MetricFamily + for _, m := range g1m { + if *m.Name == filter { + m1 = m + } + } + var m2 *dto.MetricFamily + for _, m := range g2m { + if *m.Name == filter { + m2 = m + } + } + Equals(t, m1.String(), m2.String()) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index aad5152b6580..ce27da8a9406 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -128,7 +128,7 @@ github.com/coreos/go-systemd/sdjournal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/capnslog github.com/coreos/pkg/dlopen -# github.com/cortexproject/cortex v1.0.0 +# github.com/cortexproject/cortex v1.0.1-0.20200416152925-3fe04dcff1d8 github.com/cortexproject/cortex/pkg/chunk github.com/cortexproject/cortex/pkg/chunk/aws github.com/cortexproject/cortex/pkg/chunk/azure @@ -153,6 +153,7 @@ github.com/cortexproject/cortex/pkg/querier/lazyquery github.com/cortexproject/cortex/pkg/querier/queryrange github.com/cortexproject/cortex/pkg/querier/series github.com/cortexproject/cortex/pkg/ring +github.com/cortexproject/cortex/pkg/ring/client github.com/cortexproject/cortex/pkg/ring/kv github.com/cortexproject/cortex/pkg/ring/kv/codec github.com/cortexproject/cortex/pkg/ring/kv/consul @@ -519,9 +520,12 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/thanos v0.11.0 +# github.com/thanos-io/thanos v0.12.1-0.20200416112106-b391ca115ed8 github.com/thanos-io/thanos/pkg/discovery/dns github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns +github.com/thanos-io/thanos/pkg/extprom +github.com/thanos-io/thanos/pkg/runutil +github.com/thanos-io/thanos/pkg/testutil # github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 github.com/tmc/grpc-websocket-proxy/wsproxy # github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448