diff --git a/CHANGELOG.md b/CHANGELOG.md index 9db8b06f0b0..f8c6e188a3f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ * `-query-scheduler.grpc-client-config.grpc-compression=s2` * `-ruler.client.grpc-compression=s2` * `-ruler.query-frontend.grpc-client-config.grpc-compression=s2` +* [FEATURE] Query-frontend: added experimental configuration options `query-frontend.cache-errors` and `query-frontend.results-cache-ttl-for-errors` to allow non-transient responses to be cached. When set to `true` error responses from hitting limits or bad data are cached for a short TTL. #9028 * [ENHANCEMENT] Compactor: Add `cortex_compactor_compaction_job_duration_seconds` and `cortex_compactor_compaction_job_blocks` histogram metrics to track duration of individual compaction jobs and number of blocks per job. #8371 * [ENHANCEMENT] Rules: Added per namespace max rules per rule group limit. The maximum number of rules per rule groups for all namespaces continues to be configured by `-ruler.max-rules-per-rule-group`, but now, this can be superseded by the new `-ruler.max-rules-per-rule-group-by-namespace` option on a per namespace basis. This new limit can be overridden using the overrides mechanism to be applied per-tenant. #8378 * [ENHANCEMENT] Rules: Added per namespace max rule groups per tenant limit. The maximum number of rule groups per rule tenant for all namespaces continues to be configured by `-ruler.max-rule-groups-per-tenant`, but now, this can be superseded by the new `-ruler.max-rule-groups-per-tenant-by-namespace` option on a per namespace basis. This new limit can be overridden using the overrides mechanism to be applied per-tenant. #8425 diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 4f77e1e8c86..1eec7d464b4 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -4265,6 +4265,17 @@ "fieldFlag": "query-frontend.results-cache-ttl-for-labels-query", "fieldType": "duration" }, + { + "kind": "field", + "name": "results_cache_ttl_for_errors", + "required": false, + "desc": "Time to live duration for cached non-transient errors", + "fieldValue": null, + "fieldDefaultValue": 300000000000, + "fieldFlag": "query-frontend.results-cache-ttl-for-errors", + "fieldType": "duration", + "fieldCategory": "experimental" + }, { "kind": "field", "name": "cache_unaligned_requests", @@ -6335,6 +6346,17 @@ "fieldFlag": "query-frontend.cache-results", "fieldType": "boolean" }, + { + "kind": "field", + "name": "cache_errors", + "required": false, + "desc": "Cache non-transient errors from queries.", + "fieldValue": null, + "fieldDefaultValue": false, + "fieldFlag": "query-frontend.cache-errors", + "fieldType": "boolean", + "fieldCategory": "experimental" + }, { "kind": "field", "name": "max_retries", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index d33ac244065..f1f351438e3 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -2025,6 +2025,8 @@ Usage of ./cmd/mimir/mimir: [experimental] Timeout for writing active series responses. 0 means the value from `-server.http-write-timeout` is used. (default 5m0s) -query-frontend.align-queries-with-step Mutate incoming queries to align their start and end with their step to improve result caching. + -query-frontend.cache-errors + [experimental] Cache non-transient errors from queries. -query-frontend.cache-results Cache query results. -query-frontend.cache-unaligned-requests @@ -2123,6 +2125,8 @@ Usage of ./cmd/mimir/mimir: Time to live duration for cached query results. If query falls into out-of-order time window, -query-frontend.results-cache-ttl-for-out-of-order-time-window is used instead. (default 1w) -query-frontend.results-cache-ttl-for-cardinality-query duration Time to live duration for cached cardinality query results. The value 0 disables the cache. + -query-frontend.results-cache-ttl-for-errors duration + [experimental] Time to live duration for cached non-transient errors (default 5m) -query-frontend.results-cache-ttl-for-labels-query duration Time to live duration for cached label names and label values query results. The value 0 disables the cache. -query-frontend.results-cache-ttl-for-out-of-order-time-window duration diff --git a/docs/sources/mimir/configure/about-versioning.md b/docs/sources/mimir/configure/about-versioning.md index 7b81aafb3b6..6b8508c8167 100644 --- a/docs/sources/mimir/configure/about-versioning.md +++ b/docs/sources/mimir/configure/about-versioning.md @@ -161,6 +161,7 @@ The following features are currently experimental: - Query blocking on a per-tenant basis (configured with the limit `blocked_queries`) - Sharding of active series queries (`-query-frontend.shard-active-series-queries`) - Server-side write timeout for responses to active series requests (`-query-frontend.active-series-write-timeout`) + - Caching of non-transient error responses (`-query-frontend.cache-errors`, `-query-frontend.results-cache-ttl-for-errors`) - Query-scheduler - `-query-scheduler.querier-forget-delay` - Store-gateway diff --git a/docs/sources/mimir/configure/configuration-parameters/index.md b/docs/sources/mimir/configure/configuration-parameters/index.md index 3ae87a15d31..83e493ef15e 100644 --- a/docs/sources/mimir/configure/configuration-parameters/index.md +++ b/docs/sources/mimir/configure/configuration-parameters/index.md @@ -1658,6 +1658,10 @@ results_cache: # CLI flag: -query-frontend.cache-results [cache_results: | default = false] +# (experimental) Cache non-transient errors from queries. +# CLI flag: -query-frontend.cache-errors +[cache_errors: | default = false] + # (advanced) Maximum number of retries for a single request; beyond this, the # downstream error is returned. # CLI flag: -query-frontend.max-retries-per-request @@ -3465,6 +3469,10 @@ The `limits` block configures default and per-tenant limits imposed by component # CLI flag: -query-frontend.results-cache-ttl-for-labels-query [results_cache_ttl_for_labels_query: | default = 0s] +# (experimental) Time to live duration for cached non-transient errors +# CLI flag: -query-frontend.results-cache-ttl-for-errors +[results_cache_ttl_for_errors: | default = 5m] + # (advanced) Cache requests that are not step-aligned. # CLI flag: -query-frontend.cache-unaligned-requests [cache_unaligned_requests: | default = false] diff --git a/pkg/frontend/querymiddleware/error_caching.go b/pkg/frontend/querymiddleware/error_caching.go new file mode 100644 index 00000000000..6415e370e45 --- /dev/null +++ b/pkg/frontend/querymiddleware/error_caching.go @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querymiddleware + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/gogo/protobuf/proto" + "github.com/grafana/dskit/cache" + "github.com/grafana/dskit/tenant" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + apierror "github.com/grafana/mimir/pkg/api/error" + "github.com/grafana/mimir/pkg/util/spanlogger" + "github.com/grafana/mimir/pkg/util/validation" +) + +const ( + reasonNotAPIError = "not-api-error" + reasonNotCacheableError = "not-cacheable-api-error" +) + +func newErrorCachingMiddleware(cache cache.Cache, limits Limits, shouldCacheReq shouldCacheFn, logger log.Logger, reg prometheus.Registerer) MetricsQueryMiddleware { + cacheLoadAttempted := promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "cortex_frontend_query_error_cache_requests_total", + Help: "Number of requests that check the results cache for an error.", + }) + cacheLoadHits := promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "cortex_frontend_query_error_cache_hits_total", + Help: "Number of hits for the errors in the results cache.", + }) + cacheStoreAttempted := promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "cortex_frontend_query_error_cache_store_requests_total", + Help: "Number of requests that resulted in an error.", + }) + cacheStoreSkipped := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_frontend_query_error_cache_store_skipped_total", + Help: "Number of requests that resulted in an error that was not stored in the results cache.", + }, []string{"reason"}) + + return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler { + return &errorCachingHandler{ + next: next, + cache: cache, + limits: limits, + shouldCacheReq: shouldCacheReq, + logger: logger, + cacheLoadAttempted: cacheLoadAttempted, + cacheLoadHits: cacheLoadHits, + cacheStoreAttempted: cacheStoreAttempted, + cacheStoreSkipped: cacheStoreSkipped, + } + }) +} + +type errorCachingHandler struct { + next MetricsQueryHandler + cache cache.Cache + limits Limits + shouldCacheReq shouldCacheFn + logger log.Logger + + cacheLoadAttempted prometheus.Counter + cacheLoadHits prometheus.Counter + cacheStoreAttempted prometheus.Counter + cacheStoreSkipped *prometheus.CounterVec +} + +func (e *errorCachingHandler) Do(ctx context.Context, request MetricsQueryRequest) (Response, error) { + spanLog := spanlogger.FromContext(ctx, e.logger) + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return e.next.Do(ctx, request) + } + + // Check if caching has disabled via an option on the request + if !e.shouldCacheReq(request) { + return e.next.Do(ctx, request) + } + + e.cacheLoadAttempted.Inc() + key := errorCachingKey(tenant.JoinTenantIDs(tenantIDs), request) + hashedKey := cacheHashKey(key) + + if cachedErr := e.loadErrorFromCache(ctx, key, hashedKey, spanLog); cachedErr != nil { + e.cacheLoadHits.Inc() + spanLog.DebugLog( + "msg", "returned cached API error", + "error_type", cachedErr.Type, + "key", key, + "hashed_key", hashedKey, + ) + + return nil, cachedErr + } + + res, err := e.next.Do(ctx, request) + if err != nil { + e.cacheStoreAttempted.Inc() + + var apiErr *apierror.APIError + if !errors.As(err, &apiErr) { + e.cacheStoreSkipped.WithLabelValues(reasonNotAPIError).Inc() + return res, err + } + + if cacheable, reason := e.isCacheable(apiErr); !cacheable { + e.cacheStoreSkipped.WithLabelValues(reason).Inc() + spanLog.DebugLog( + "msg", "error result from request is not cacheable", + "error_type", apiErr.Type, + "reason", reason, + ) + return res, err + } + + ttl := validation.MinDurationPerTenant(tenantIDs, e.limits.ResultsCacheTTLForErrors) + e.storeErrorToCache(key, hashedKey, ttl, apiErr, spanLog) + } + + return res, err +} + +func (e *errorCachingHandler) loadErrorFromCache(ctx context.Context, key, hashedKey string, spanLog *spanlogger.SpanLogger) *apierror.APIError { + res := e.cache.GetMulti(ctx, []string{hashedKey}) + cached, ok := res[hashedKey] + if !ok { + return nil + } + + var cachedError CachedError + if err := proto.Unmarshal(cached, &cachedError); err != nil { + level.Warn(spanLog).Log("msg", "unable to unmarshall cached error", "err", err) + return nil + } + + if cachedError.GetKey() != key { + spanLog.DebugLog( + "msg", "cached error key does not match", + "expected_key", key, + "actual_key", cachedError.GetKey(), + "hashed_key", hashedKey, + ) + return nil + } + + return apierror.New(apierror.Type(cachedError.ErrorType), cachedError.ErrorMessage) + +} + +func (e *errorCachingHandler) storeErrorToCache(key, hashedKey string, ttl time.Duration, apiErr *apierror.APIError, spanLog *spanlogger.SpanLogger) { + bytes, err := proto.Marshal(&CachedError{ + Key: key, + ErrorType: string(apiErr.Type), + ErrorMessage: apiErr.Message, + }) + + if err != nil { + level.Warn(spanLog).Log("msg", "unable to marshal cached error", "err", err) + return + } + + e.cache.SetAsync(hashedKey, bytes, ttl) +} + +func (e *errorCachingHandler) isCacheable(apiErr *apierror.APIError) (bool, string) { + if apiErr.Type != apierror.TypeBadData && apiErr.Type != apierror.TypeExec && apiErr.Type != apierror.TypeTooLargeEntry { + return false, reasonNotCacheableError + } + + return true, "" +} + +// errorCachingKey returns the key for caching and error query response. Standalone function +// to allow for easier testing. +func errorCachingKey(tenantID string, r MetricsQueryRequest) string { + return fmt.Sprintf("EC:%s:%s:%d:%d:%d", tenantID, r.GetQuery(), r.GetStart(), r.GetEnd(), r.GetStep()) +} diff --git a/pkg/frontend/querymiddleware/error_caching_test.go b/pkg/frontend/querymiddleware/error_caching_test.go new file mode 100644 index 00000000000..46fae948f78 --- /dev/null +++ b/pkg/frontend/querymiddleware/error_caching_test.go @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querymiddleware + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/grafana/dskit/cache" + "github.com/grafana/dskit/user" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + apierror "github.com/grafana/mimir/pkg/api/error" + "github.com/grafana/mimir/pkg/util/test" +) + +func TestErrorCachingHandler_Do(t *testing.T) { + newDefaultRequest := func() *PrometheusRangeQueryRequest { + return &PrometheusRangeQueryRequest{ + queryExpr: parseQuery(t, "up"), + start: 100, + end: 200, + step: 10, + } + } + + runHandler := func(ctx context.Context, inner MetricsQueryHandler, c cache.Cache, req MetricsQueryRequest) (Response, error) { + limits := &mockLimits{resultsCacheTTLForErrors: time.Minute} + middleware := newErrorCachingMiddleware(c, limits, resultsCacheEnabledByOption, test.NewTestingLogger(t), prometheus.NewPedanticRegistry()) + handler := middleware.Wrap(inner) + return handler.Do(ctx, req) + } + + t.Run("no user set", func(t *testing.T) { + c := cache.NewInstrumentedMockCache() + + innerRes := newEmptyPrometheusResponse() + inner := &mockHandler{} + inner.On("Do", mock.Anything, mock.Anything).Return(innerRes, nil) + + ctx := context.Background() + req := newDefaultRequest() + res, err := runHandler(ctx, inner, c, req) + + require.NoError(t, err) + require.Equal(t, innerRes, res) + require.Equal(t, 0, c.CountFetchCalls()) + require.Equal(t, 0, c.CountStoreCalls()) + }) + + t.Run("disabled by option", func(t *testing.T) { + c := cache.NewInstrumentedMockCache() + + innerRes := newEmptyPrometheusResponse() + inner := &mockHandler{} + inner.On("Do", mock.Anything, mock.Anything).Return(innerRes, nil) + + ctx := user.InjectOrgID(context.Background(), "1234") + req := newDefaultRequest() + req.options = Options{ + CacheDisabled: true, + } + res, err := runHandler(ctx, inner, c, req) + + require.NoError(t, err) + require.Equal(t, innerRes, res) + require.Equal(t, 0, c.CountFetchCalls()) + require.Equal(t, 0, c.CountStoreCalls()) + }) + + t.Run("cache hit", func(t *testing.T) { + c := cache.NewInstrumentedMockCache() + + inner := &mockHandler{} + + ctx := user.InjectOrgID(context.Background(), "1234") + req := newDefaultRequest() + key := errorCachingKey("1234", req) + bytes, err := proto.Marshal(&CachedError{ + Key: key, + ErrorType: string(apierror.TypeExec), + ErrorMessage: "limits error", + }) + require.NoError(t, err) + + // NOTE: We rely on this mock cache being synchronous + c.SetAsync(cacheHashKey(key), bytes, time.Minute) + res, err := runHandler(ctx, inner, c, req) + + require.Error(t, err) + require.Nil(t, res) + require.Equal(t, 1, c.CountFetchCalls()) + require.Equal(t, 1, c.CountStoreCalls()) + }) + + t.Run("cache hit key collision", func(t *testing.T) { + c := cache.NewInstrumentedMockCache() + + innerRes := newEmptyPrometheusResponse() + inner := &mockHandler{} + inner.On("Do", mock.Anything, mock.Anything).Return(innerRes, nil) + + ctx := user.InjectOrgID(context.Background(), "1234") + req := newDefaultRequest() + + key := errorCachingKey("1234", req) + bytes, err := proto.Marshal(&CachedError{ + Key: "different key that is stored under the same hashed key", + ErrorType: string(apierror.TypeExec), + ErrorMessage: "limits error", + }) + require.NoError(t, err) + + // NOTE: We rely on this mock cache being synchronous + c.SetAsync(cacheHashKey(key), bytes, time.Minute) + res, err := runHandler(ctx, inner, c, req) + + require.NoError(t, err) + require.Equal(t, innerRes, res) + require.Equal(t, 1, c.CountFetchCalls()) + require.Equal(t, 1, c.CountStoreCalls()) + }) + + t.Run("corrupt cache data", func(t *testing.T) { + c := cache.NewInstrumentedMockCache() + + innerRes := newEmptyPrometheusResponse() + inner := &mockHandler{} + inner.On("Do", mock.Anything, mock.Anything).Return(innerRes, nil) + + ctx := user.InjectOrgID(context.Background(), "1234") + req := newDefaultRequest() + + key := errorCachingKey("1234", req) + bytes := []byte{0x0, 0x0, 0x0, 0x0} + + // NOTE: We rely on this mock cache being synchronous + c.SetAsync(cacheHashKey(key), bytes, time.Minute) + res, err := runHandler(ctx, inner, c, req) + + require.NoError(t, err) + require.Equal(t, innerRes, res) + require.Equal(t, 1, c.CountFetchCalls()) + require.Equal(t, 1, c.CountStoreCalls()) + }) + + t.Run("cache miss no error", func(t *testing.T) { + c := cache.NewInstrumentedMockCache() + + innerRes := newEmptyPrometheusResponse() + inner := &mockHandler{} + inner.On("Do", mock.Anything, mock.Anything).Return(innerRes, nil) + + ctx := user.InjectOrgID(context.Background(), "1234") + req := newDefaultRequest() + res, err := runHandler(ctx, inner, c, req) + + require.NoError(t, err) + require.Equal(t, innerRes, res) + require.Equal(t, 1, c.CountFetchCalls()) + require.Equal(t, 0, c.CountStoreCalls()) + }) + + t.Run("non-api error", func(t *testing.T) { + c := cache.NewInstrumentedMockCache() + + innerErr := errors.New("something unexpected happened") + inner := &mockHandler{} + inner.On("Do", mock.Anything, mock.Anything).Return((*PrometheusResponse)(nil), innerErr) + + ctx := user.InjectOrgID(context.Background(), "1234") + req := newDefaultRequest() + res, err := runHandler(ctx, inner, c, req) + + require.Error(t, err) + require.Nil(t, res) + require.Equal(t, 1, c.CountFetchCalls()) + require.Equal(t, 0, c.CountStoreCalls()) + }) + + t.Run("api error not cachable", func(t *testing.T) { + c := cache.NewInstrumentedMockCache() + + innerErr := apierror.New(apierror.TypeUnavailable, "service unavailable") + inner := &mockHandler{} + inner.On("Do", mock.Anything, mock.Anything).Return((*PrometheusResponse)(nil), innerErr) + + ctx := user.InjectOrgID(context.Background(), "1234") + req := newDefaultRequest() + res, err := runHandler(ctx, inner, c, req) + + require.Error(t, err) + require.Nil(t, res) + require.Equal(t, 1, c.CountFetchCalls()) + require.Equal(t, 0, c.CountStoreCalls()) + }) + + t.Run("api error cachable", func(t *testing.T) { + c := cache.NewInstrumentedMockCache() + + innerErr := apierror.New(apierror.TypeTooLargeEntry, "response is too big") + inner := &mockHandler{} + inner.On("Do", mock.Anything, mock.Anything).Return((*PrometheusResponse)(nil), innerErr) + + ctx := user.InjectOrgID(context.Background(), "1234") + req := newDefaultRequest() + res, err := runHandler(ctx, inner, c, req) + + require.Error(t, err) + require.Nil(t, res) + require.Equal(t, 1, c.CountFetchCalls()) + require.Equal(t, 1, c.CountStoreCalls()) + }) +} diff --git a/pkg/frontend/querymiddleware/limits.go b/pkg/frontend/querymiddleware/limits.go index 80d3e1bf05b..74e1f3a7848 100644 --- a/pkg/frontend/querymiddleware/limits.go +++ b/pkg/frontend/querymiddleware/limits.go @@ -90,6 +90,9 @@ type Limits interface { // ResultsCacheTTLForLabelsQuery returns TTL for cached results for label names and values queries. ResultsCacheTTLForLabelsQuery(userID string) time.Duration + // ResultsCacheTTLForErrors returns TTL for cached non-transient errors. + ResultsCacheTTLForErrors(userID string) time.Duration + // ResultsCacheForUnalignedQueryEnabled returns whether to cache results for queries that are not step-aligned ResultsCacheForUnalignedQueryEnabled(userID string) bool diff --git a/pkg/frontend/querymiddleware/limits_test.go b/pkg/frontend/querymiddleware/limits_test.go index fe4e80cc2c6..c12ccf43760 100644 --- a/pkg/frontend/querymiddleware/limits_test.go +++ b/pkg/frontend/querymiddleware/limits_test.go @@ -561,6 +561,10 @@ func (m multiTenantMockLimits) ResultsCacheTTLForLabelsQuery(userID string) time return m.byTenant[userID].resultsCacheTTLForLabelsQuery } +func (m multiTenantMockLimits) ResultsCacheTTLForErrors(userID string) time.Duration { + return m.byTenant[userID].resultsCacheTTLForErrors +} + func (m multiTenantMockLimits) ResultsCacheForUnalignedQueryEnabled(userID string) bool { return m.byTenant[userID].resultsCacheForUnalignedQueryEnabled } @@ -609,6 +613,7 @@ type mockLimits struct { resultsCacheOutOfOrderWindowTTL time.Duration resultsCacheTTLForCardinalityQuery time.Duration resultsCacheTTLForLabelsQuery time.Duration + resultsCacheTTLForErrors time.Duration resultsCacheForUnalignedQueryEnabled bool blockedQueries []*validation.BlockedQuery alignQueriesWithStep bool @@ -678,6 +683,10 @@ func (m mockLimits) ResultsCacheTTLForOutOfOrderTimeWindow(string) time.Duration return m.resultsCacheOutOfOrderWindowTTL } +func (m mockLimits) ResultsCacheTTLForErrors(string) time.Duration { + return m.resultsCacheTTLForErrors +} + func (m mockLimits) ResultsCacheTTLForCardinalityQuery(string) time.Duration { return m.resultsCacheTTLForCardinalityQuery } diff --git a/pkg/frontend/querymiddleware/model.pb.go b/pkg/frontend/querymiddleware/model.pb.go index 51f4239f768..47f80838c30 100644 --- a/pkg/frontend/querymiddleware/model.pb.go +++ b/pkg/frontend/querymiddleware/model.pb.go @@ -276,6 +276,65 @@ func (m *SampleStream) GetHistograms() []mimirpb.FloatHistogramPair { return nil } +type CachedError struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"` + ErrorType string `protobuf:"bytes,2,opt,name=errorType,proto3" json:"errorType"` + ErrorMessage string `protobuf:"bytes,3,opt,name=errorMessage,proto3" json:"errorMessage"` +} + +func (m *CachedError) Reset() { *m = CachedError{} } +func (*CachedError) ProtoMessage() {} +func (*CachedError) Descriptor() ([]byte, []int) { + return fileDescriptor_4c16552f9fdb66d8, []int{4} +} +func (m *CachedError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CachedError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CachedError.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CachedError) XXX_Merge(src proto.Message) { + xxx_messageInfo_CachedError.Merge(m, src) +} +func (m *CachedError) XXX_Size() int { + return m.Size() +} +func (m *CachedError) XXX_DiscardUnknown() { + xxx_messageInfo_CachedError.DiscardUnknown(m) +} + +var xxx_messageInfo_CachedError proto.InternalMessageInfo + +func (m *CachedError) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *CachedError) GetErrorType() string { + if m != nil { + return m.ErrorType + } + return "" +} + +func (m *CachedError) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + type CachedResponse struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"` // List of cached responses; non-overlapping and in order. @@ -285,7 +344,7 @@ type CachedResponse struct { func (m *CachedResponse) Reset() { *m = CachedResponse{} } func (*CachedResponse) ProtoMessage() {} func (*CachedResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4c16552f9fdb66d8, []int{4} + return fileDescriptor_4c16552f9fdb66d8, []int{5} } func (m *CachedResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -342,7 +401,7 @@ type Extent struct { func (m *Extent) Reset() { *m = Extent{} } func (*Extent) ProtoMessage() {} func (*Extent) Descriptor() ([]byte, []int) { - return fileDescriptor_4c16552f9fdb66d8, []int{5} + return fileDescriptor_4c16552f9fdb66d8, []int{6} } func (m *Extent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -418,7 +477,7 @@ type Options struct { func (m *Options) Reset() { *m = Options{} } func (*Options) ProtoMessage() {} func (*Options) Descriptor() ([]byte, []int) { - return fileDescriptor_4c16552f9fdb66d8, []int{6} + return fileDescriptor_4c16552f9fdb66d8, []int{7} } func (m *Options) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -489,7 +548,7 @@ type QueryStatistics struct { func (m *QueryStatistics) Reset() { *m = QueryStatistics{} } func (*QueryStatistics) ProtoMessage() {} func (*QueryStatistics) Descriptor() ([]byte, []int) { - return fileDescriptor_4c16552f9fdb66d8, []int{7} + return fileDescriptor_4c16552f9fdb66d8, []int{8} } func (m *QueryStatistics) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -541,7 +600,7 @@ type CachedHTTPResponse struct { func (m *CachedHTTPResponse) Reset() { *m = CachedHTTPResponse{} } func (*CachedHTTPResponse) ProtoMessage() {} func (*CachedHTTPResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4c16552f9fdb66d8, []int{8} + return fileDescriptor_4c16552f9fdb66d8, []int{9} } func (m *CachedHTTPResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -607,7 +666,7 @@ type CachedHTTPHeader struct { func (m *CachedHTTPHeader) Reset() { *m = CachedHTTPHeader{} } func (*CachedHTTPHeader) ProtoMessage() {} func (*CachedHTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_4c16552f9fdb66d8, []int{9} + return fileDescriptor_4c16552f9fdb66d8, []int{10} } func (m *CachedHTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -655,6 +714,7 @@ func init() { proto.RegisterType((*PrometheusResponse)(nil), "queryrange.PrometheusResponse") proto.RegisterType((*PrometheusData)(nil), "queryrange.PrometheusData") proto.RegisterType((*SampleStream)(nil), "queryrange.SampleStream") + proto.RegisterType((*CachedError)(nil), "queryrange.CachedError") proto.RegisterType((*CachedResponse)(nil), "queryrange.CachedResponse") proto.RegisterType((*Extent)(nil), "queryrange.Extent") proto.RegisterType((*Options)(nil), "queryrange.Options") @@ -666,71 +726,74 @@ func init() { func init() { proto.RegisterFile("model.proto", fileDescriptor_4c16552f9fdb66d8) } var fileDescriptor_4c16552f9fdb66d8 = []byte{ - // 1015 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xcf, 0x6e, 0x23, 0xc5, - 0x13, 0xf6, 0xc4, 0x7f, 0x53, 0xde, 0x5f, 0x62, 0x75, 0xa2, 0x1f, 0x4e, 0x80, 0x19, 0x6b, 0xc4, - 0x21, 0xa0, 0x5d, 0x07, 0x82, 0xe0, 0x80, 0x00, 0xb1, 0xce, 0x06, 0x25, 0xb0, 0x40, 0x68, 0x47, - 0x20, 0x71, 0x89, 0xda, 0x9e, 0x8e, 0x3d, 0xec, 0xcc, 0xf4, 0xd0, 0xdd, 0xde, 0x5d, 0xdf, 0x10, - 0x0f, 0x80, 0x78, 0x02, 0xce, 0x3c, 0xca, 0x1e, 0x73, 0x5c, 0xed, 0x61, 0x44, 0x1c, 0x21, 0x21, - 0x9f, 0xf6, 0x01, 0x38, 0xa0, 0xa9, 0x9e, 0x19, 0xcf, 0xee, 0xe6, 0xc0, 0xc5, 0xd3, 0xfd, 0xd5, - 0xf7, 0x55, 0x55, 0x57, 0x57, 0x97, 0xa1, 0x1d, 0x0a, 0x8f, 0x07, 0xfd, 0x58, 0x0a, 0x2d, 0x08, - 0xfc, 0x34, 0xe3, 0x72, 0x2e, 0x59, 0x34, 0xe1, 0xbb, 0x77, 0x26, 0xbe, 0x9e, 0xce, 0x46, 0xfd, - 0xb1, 0x08, 0xf7, 0x27, 0x62, 0x22, 0xf6, 0x91, 0x32, 0x9a, 0x5d, 0xe0, 0x0e, 0x37, 0xb8, 0x32, - 0xd2, 0xdd, 0x77, 0xcb, 0x74, 0xc9, 0x2e, 0x58, 0xc4, 0xf6, 0x43, 0x3f, 0xf4, 0xe5, 0x7e, 0xfc, - 0x60, 0x62, 0x56, 0xf1, 0xc8, 0x7c, 0x33, 0xc5, 0xce, 0x44, 0x88, 0x49, 0xc0, 0x57, 0x7e, 0x59, - 0x34, 0x37, 0x26, 0xf7, 0x3e, 0x74, 0x4e, 0xa5, 0x08, 0xb9, 0x9e, 0xf2, 0x99, 0x3a, 0xe6, 0xcc, - 0xe3, 0x92, 0xec, 0x40, 0xed, 0x6b, 0x16, 0xf2, 0xae, 0xd5, 0xb3, 0xf6, 0xd6, 0x07, 0xf5, 0x65, - 0xe2, 0x58, 0x77, 0x28, 0x42, 0xe4, 0x4d, 0x68, 0x7c, 0xc7, 0x82, 0x19, 0x57, 0xdd, 0xb5, 0x5e, - 0x75, 0x65, 0xcc, 0x40, 0xf7, 0x9f, 0x35, 0x20, 0x2b, 0x77, 0x94, 0xab, 0x58, 0x44, 0x8a, 0x13, - 0x17, 0x1a, 0x43, 0xcd, 0xf4, 0x4c, 0x65, 0x2e, 0x61, 0x99, 0x38, 0x0d, 0x85, 0x08, 0xcd, 0x2c, - 0x64, 0x00, 0xb5, 0x7b, 0x4c, 0xb3, 0xee, 0x5a, 0xcf, 0xda, 0x6b, 0x1f, 0xec, 0xf6, 0x57, 0xf5, - 0xe9, 0xaf, 0x3c, 0xa6, 0x8c, 0x01, 0x59, 0x26, 0xce, 0x86, 0xc7, 0x34, 0xbb, 0x2d, 0x42, 0x5f, - 0xf3, 0x30, 0xd6, 0x73, 0x8a, 0x5a, 0xf2, 0x01, 0xac, 0x1f, 0x49, 0x29, 0xe4, 0xd9, 0x3c, 0xe6, - 0xdd, 0x2a, 0x86, 0x7a, 0x6d, 0x99, 0x38, 0x5b, 0x3c, 0x07, 0x4b, 0x8a, 0x15, 0x93, 0xbc, 0x0d, - 0x75, 0xdc, 0x74, 0x6b, 0x28, 0xd9, 0x5a, 0x26, 0xce, 0x26, 0x4a, 0x4a, 0x74, 0xc3, 0x20, 0x9f, - 0x40, 0xd3, 0x14, 0x49, 0x75, 0xeb, 0xbd, 0xea, 0x5e, 0xfb, 0xe0, 0x8d, 0x9b, 0x13, 0x35, 0xa4, - 0xbc, 0x3c, 0xb9, 0x86, 0x1c, 0x40, 0xeb, 0x7b, 0x26, 0x23, 0x3f, 0x9a, 0xa8, 0x6e, 0x03, 0x0b, - 0xf8, 0xff, 0x65, 0xe2, 0x90, 0x47, 0x19, 0x56, 0x8a, 0x57, 0xf0, 0xd2, 0xec, 0x4e, 0xa2, 0x0b, - 0xa1, 0xba, 0x4d, 0x14, 0x60, 0x76, 0x7e, 0x0a, 0x94, 0xb3, 0x43, 0x86, 0xfb, 0x8b, 0x05, 0x1b, - 0x2f, 0x16, 0x8b, 0xf4, 0x01, 0x28, 0x57, 0xb3, 0x40, 0x63, 0x4d, 0x4c, 0xf9, 0x37, 0x96, 0x89, - 0x03, 0xb2, 0x40, 0x69, 0x89, 0x41, 0x3e, 0x83, 0x86, 0xd9, 0xe1, 0x05, 0xb7, 0x0f, 0xba, 0xe5, - 0xf3, 0x0d, 0x59, 0x18, 0x07, 0x7c, 0xa8, 0x25, 0x67, 0xe1, 0x60, 0xe3, 0x49, 0xe2, 0x54, 0xd2, - 0x8b, 0x34, 0x9e, 0x68, 0xa6, 0x73, 0x7f, 0x5d, 0x83, 0x5b, 0x65, 0x22, 0x89, 0xa1, 0x11, 0xb0, - 0x11, 0x0f, 0xd2, 0xdb, 0x4f, 0x5d, 0x6e, 0xf5, 0xc7, 0x42, 0x6a, 0xfe, 0x38, 0x1e, 0xf5, 0xef, - 0xa7, 0xf8, 0x29, 0xf3, 0xe5, 0xe0, 0x30, 0xf5, 0xf6, 0x2c, 0x71, 0xde, 0xfb, 0x2f, 0xcd, 0x6d, - 0x74, 0x77, 0x3d, 0x16, 0x6b, 0x2e, 0xd3, 0x14, 0x42, 0xae, 0xa5, 0x3f, 0xa6, 0x59, 0x1c, 0xf2, - 0x11, 0x34, 0x15, 0x66, 0xa0, 0xb2, 0x53, 0x74, 0x56, 0x21, 0x4d, 0x6a, 0xab, 0xec, 0x1f, 0x62, - 0xe7, 0xd2, 0x5c, 0x40, 0x4e, 0x01, 0xa6, 0xbe, 0xd2, 0x62, 0x22, 0x59, 0xa8, 0xba, 0xd5, 0xec, - 0x92, 0x0b, 0xf9, 0xe7, 0x81, 0x60, 0xfa, 0x38, 0x27, 0x60, 0xea, 0x24, 0x73, 0x55, 0xd2, 0xd1, - 0xd2, 0xda, 0xfd, 0x11, 0x36, 0x0e, 0xd9, 0x78, 0xca, 0xbd, 0xe2, 0x3d, 0xec, 0x40, 0xf5, 0x01, - 0x9f, 0x67, 0xb7, 0xd1, 0x5c, 0x26, 0x4e, 0xba, 0xa5, 0xe9, 0x4f, 0xda, 0x60, 0xfc, 0xb1, 0xe6, - 0x91, 0xce, 0x53, 0x27, 0xe5, 0x0b, 0x38, 0x42, 0xd3, 0x60, 0x33, 0x8b, 0x98, 0x53, 0x69, 0xbe, - 0x70, 0x9f, 0x59, 0xd0, 0x30, 0x24, 0xe2, 0x40, 0x5d, 0x69, 0x26, 0x35, 0x86, 0xa9, 0x0e, 0xd6, - 0x97, 0x89, 0x63, 0x00, 0x6a, 0x3e, 0x69, 0x16, 0x3c, 0xf2, 0xf0, 0xc1, 0x55, 0x4d, 0x16, 0x3c, - 0xf2, 0x68, 0xfa, 0x43, 0x7a, 0xd0, 0xd2, 0x92, 0x8d, 0xf9, 0xb9, 0xef, 0x65, 0x8f, 0x22, 0xef, - 0x64, 0x84, 0x4f, 0x3c, 0xf2, 0x29, 0xb4, 0x64, 0x76, 0x9c, 0x6e, 0x1d, 0x9f, 0xec, 0x76, 0xdf, - 0x4c, 0x99, 0x7e, 0x3e, 0x65, 0xfa, 0x77, 0xa3, 0xf9, 0xe0, 0xd6, 0x32, 0x71, 0x0a, 0x26, 0x2d, - 0x56, 0xe4, 0x36, 0x10, 0x3c, 0xd7, 0xb9, 0xf6, 0x43, 0xae, 0x34, 0x0b, 0xe3, 0xf3, 0x30, 0x7d, - 0x13, 0xd6, 0x5e, 0x95, 0x76, 0xd0, 0x72, 0x96, 0x1b, 0xbe, 0x52, 0x5f, 0xd4, 0x5a, 0xd5, 0x4e, - 0xcd, 0xfd, 0xcb, 0x82, 0xe6, 0x37, 0xb1, 0xf6, 0x45, 0xa4, 0xc8, 0x5b, 0xf0, 0x3f, 0x2c, 0xea, - 0x3d, 0x5f, 0xb1, 0x51, 0xc0, 0x3d, 0x3c, 0x65, 0x8b, 0xbe, 0x08, 0x92, 0x77, 0xa0, 0x33, 0x9c, - 0x32, 0xe9, 0xf9, 0xd1, 0xa4, 0x20, 0xae, 0x21, 0xf1, 0x15, 0x9c, 0xf4, 0xa0, 0x7d, 0x26, 0x34, - 0x0b, 0xd0, 0xa0, 0x70, 0x7c, 0xd4, 0x69, 0x19, 0x22, 0x07, 0xb0, 0x7d, 0x12, 0x29, 0xcd, 0x22, - 0x3d, 0x8c, 0x03, 0x5f, 0x17, 0x1e, 0x6b, 0xe8, 0xf1, 0x46, 0xdb, 0xcb, 0x9a, 0x93, 0x48, 0x73, - 0xf9, 0x90, 0x05, 0x58, 0xb3, 0x2a, 0xbd, 0xd1, 0xe6, 0x1e, 0xc1, 0xe6, 0xb7, 0x69, 0x05, 0xd2, - 0xc9, 0xe8, 0x2b, 0xed, 0x8f, 0x31, 0xf4, 0x91, 0xd2, 0x7e, 0xc8, 0x34, 0xf7, 0x86, 0x5c, 0xfa, - 0x5c, 0x1d, 0x8a, 0x59, 0x64, 0xee, 0xb6, 0x46, 0x6f, 0xb4, 0xb9, 0xbf, 0x5b, 0x40, 0x4c, 0xe3, - 0x1d, 0x9f, 0x9d, 0x9d, 0x16, 0xcd, 0xf7, 0x3a, 0xac, 0x8f, 0x53, 0xf4, 0xbc, 0x68, 0x41, 0xda, - 0x42, 0xe0, 0x4b, 0x3e, 0x27, 0x0e, 0xb4, 0xcd, 0x5c, 0x3e, 0x1f, 0x0b, 0x8f, 0x63, 0xad, 0xea, - 0x14, 0x0c, 0x74, 0x28, 0x3c, 0x4e, 0x3e, 0x84, 0xe6, 0x34, 0x1b, 0x80, 0xd5, 0x57, 0x07, 0xe0, - 0x2a, 0x9c, 0x99, 0x78, 0x34, 0x27, 0x13, 0x02, 0xb5, 0x91, 0xf0, 0xe6, 0x58, 0xab, 0x5b, 0x14, - 0xd7, 0xee, 0xc7, 0xd0, 0x79, 0x59, 0x90, 0xf2, 0xa2, 0xe2, 0xbf, 0x87, 0xe2, 0x9a, 0x6c, 0x43, - 0x1d, 0x5f, 0x29, 0xa6, 0xb3, 0x4e, 0xcd, 0x66, 0x70, 0x74, 0x79, 0x65, 0x57, 0x9e, 0x5e, 0xd9, - 0x95, 0xe7, 0x57, 0xb6, 0xf5, 0xf3, 0xc2, 0xb6, 0xfe, 0x58, 0xd8, 0xd6, 0x93, 0x85, 0x6d, 0x5d, - 0x2e, 0x6c, 0xeb, 0xcf, 0x85, 0x6d, 0xfd, 0xbd, 0xb0, 0x2b, 0xcf, 0x17, 0xb6, 0xf5, 0xdb, 0xb5, - 0x5d, 0xb9, 0xbc, 0xb6, 0x2b, 0x4f, 0xaf, 0xed, 0xca, 0x0f, 0x9b, 0x98, 0x6d, 0xe8, 0x7b, 0x5e, - 0xc0, 0x1f, 0x31, 0xc9, 0x47, 0x0d, 0x6c, 0xd7, 0xf7, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xab, - 0x90, 0x68, 0x82, 0x9e, 0x07, 0x00, 0x00, + // 1061 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xc6, 0xff, 0x9f, 0xd3, 0xc4, 0x9a, 0x44, 0xe0, 0x04, 0xd8, 0xb5, 0x56, 0x1c, 0x02, + 0xb4, 0x0e, 0x84, 0x3f, 0x07, 0x44, 0x11, 0x75, 0x1a, 0x94, 0x40, 0x0b, 0x61, 0x1c, 0x81, 0xc4, + 0x25, 0x1a, 0x7b, 0x27, 0xf6, 0xd2, 0xdd, 0x9d, 0x65, 0x66, 0xdc, 0xd6, 0x37, 0xc4, 0x15, 0x09, + 0xf1, 0x09, 0x38, 0xf3, 0x51, 0x7a, 0xcc, 0xb1, 0xea, 0x61, 0x45, 0x1c, 0x21, 0xa1, 0x3d, 0xf5, + 0x03, 0x70, 0x40, 0x3b, 0xb3, 0xff, 0xd2, 0x46, 0x88, 0x8b, 0x3d, 0xef, 0xf7, 0x7e, 0xbf, 0x37, + 0x6f, 0xde, 0xbc, 0x79, 0x0b, 0x1d, 0x9f, 0x39, 0xd4, 0x1b, 0x84, 0x9c, 0x49, 0x86, 0xe0, 0xc7, + 0x39, 0xe5, 0x0b, 0x4e, 0x82, 0x29, 0xdd, 0xbe, 0x35, 0x75, 0xe5, 0x6c, 0x3e, 0x1e, 0x4c, 0x98, + 0xbf, 0x3b, 0x65, 0x53, 0xb6, 0xab, 0x28, 0xe3, 0xf9, 0x99, 0xb2, 0x94, 0xa1, 0x56, 0x5a, 0xba, + 0xfd, 0x6e, 0x99, 0xce, 0xc9, 0x19, 0x09, 0xc8, 0xae, 0xef, 0xfa, 0x2e, 0xdf, 0x0d, 0x1f, 0x4c, + 0xf5, 0x2a, 0x1c, 0xeb, 0xff, 0x54, 0xb1, 0x35, 0x65, 0x6c, 0xea, 0xd1, 0x22, 0x2e, 0x09, 0x16, + 0xda, 0x65, 0xdf, 0x83, 0xee, 0x31, 0x67, 0x3e, 0x95, 0x33, 0x3a, 0x17, 0x87, 0x94, 0x38, 0x94, + 0xa3, 0x2d, 0xa8, 0x7d, 0x45, 0x7c, 0xda, 0x33, 0xfa, 0xc6, 0x4e, 0x7b, 0x58, 0x8f, 0x23, 0xcb, + 0xb8, 0x85, 0x15, 0x84, 0xde, 0x80, 0xc6, 0xb7, 0xc4, 0x9b, 0x53, 0xd1, 0x5b, 0xe9, 0x57, 0x0b, + 0x67, 0x0a, 0xda, 0xff, 0xac, 0x00, 0x2a, 0xc2, 0x61, 0x2a, 0x42, 0x16, 0x08, 0x8a, 0x6c, 0x68, + 0x8c, 0x24, 0x91, 0x73, 0x91, 0x86, 0x84, 0x38, 0xb2, 0x1a, 0x42, 0x21, 0x38, 0xf5, 0xa0, 0x21, + 0xd4, 0xee, 0x12, 0x49, 0x7a, 0x2b, 0x7d, 0x63, 0xa7, 0xb3, 0xb7, 0x3d, 0x28, 0xea, 0x33, 0x28, + 0x22, 0x26, 0x8c, 0x21, 0x8a, 0x23, 0x6b, 0xcd, 0x21, 0x92, 0xdc, 0x64, 0xbe, 0x2b, 0xa9, 0x1f, + 0xca, 0x05, 0x56, 0x5a, 0xf4, 0x21, 0xb4, 0x0f, 0x38, 0x67, 0xfc, 0x64, 0x11, 0xd2, 0x5e, 0x55, + 0x6d, 0xf5, 0x6a, 0x1c, 0x59, 0x1b, 0x34, 0x03, 0x4b, 0x8a, 0x82, 0x89, 0xde, 0x82, 0xba, 0x32, + 0x7a, 0x35, 0x25, 0xd9, 0x88, 0x23, 0x6b, 0x5d, 0x49, 0x4a, 0x74, 0xcd, 0x40, 0xb7, 0xa1, 0xa9, + 0x8b, 0x24, 0x7a, 0xf5, 0x7e, 0x75, 0xa7, 0xb3, 0xf7, 0xfa, 0xf5, 0x89, 0x6a, 0x52, 0x56, 0x9e, + 0x4c, 0x83, 0xf6, 0xa0, 0xf5, 0x1d, 0xe1, 0x81, 0x1b, 0x4c, 0x45, 0xaf, 0xa1, 0x0a, 0xf8, 0x4a, + 0x1c, 0x59, 0xe8, 0x51, 0x8a, 0x95, 0xf6, 0xcb, 0x79, 0x49, 0x76, 0x47, 0xc1, 0x19, 0x13, 0xbd, + 0xa6, 0x12, 0xa8, 0xec, 0xdc, 0x04, 0x28, 0x67, 0xa7, 0x18, 0xf6, 0xcf, 0x06, 0xac, 0x5d, 0x2d, + 0x16, 0x1a, 0x00, 0x60, 0x2a, 0xe6, 0x9e, 0x54, 0x35, 0xd1, 0xe5, 0x5f, 0x8b, 0x23, 0x0b, 0x78, + 0x8e, 0xe2, 0x12, 0x03, 0x7d, 0x06, 0x0d, 0x6d, 0xa9, 0x0b, 0xee, 0xec, 0xf5, 0xca, 0xe7, 0x1b, + 0x11, 0x3f, 0xf4, 0xe8, 0x48, 0x72, 0x4a, 0xfc, 0xe1, 0xda, 0x93, 0xc8, 0xaa, 0x24, 0x17, 0xa9, + 0x23, 0xe1, 0x54, 0x67, 0xff, 0xba, 0x02, 0xab, 0x65, 0x22, 0x0a, 0xa1, 0xe1, 0x91, 0x31, 0xf5, + 0x92, 0xdb, 0x4f, 0x42, 0x6e, 0x0c, 0x26, 0x8c, 0x4b, 0xfa, 0x38, 0x1c, 0x0f, 0xee, 0x25, 0xf8, + 0x31, 0x71, 0xf9, 0x70, 0x3f, 0x89, 0xf6, 0x2c, 0xb2, 0xde, 0xfb, 0x3f, 0xcd, 0xad, 0x75, 0x77, + 0x1c, 0x12, 0x4a, 0xca, 0x93, 0x14, 0x7c, 0x2a, 0xb9, 0x3b, 0xc1, 0xe9, 0x3e, 0xe8, 0x63, 0x68, + 0x0a, 0x95, 0x81, 0x48, 0x4f, 0xd1, 0x2d, 0xb6, 0xd4, 0xa9, 0x15, 0xd9, 0x3f, 0x54, 0x9d, 0x8b, + 0x33, 0x01, 0x3a, 0x06, 0x98, 0xb9, 0x42, 0xb2, 0x29, 0x27, 0xbe, 0xe8, 0x55, 0xd3, 0x4b, 0xce, + 0xe5, 0x9f, 0x7b, 0x8c, 0xc8, 0xc3, 0x8c, 0xa0, 0x52, 0x47, 0x69, 0xa8, 0x92, 0x0e, 0x97, 0xd6, + 0xf6, 0x2f, 0x06, 0x74, 0xf6, 0xc9, 0x64, 0x46, 0x1d, 0xdd, 0x43, 0x5b, 0x50, 0x7d, 0x40, 0x17, + 0xe9, 0x5d, 0x34, 0xe3, 0xc8, 0x4a, 0x4c, 0x9c, 0xfc, 0xa0, 0x77, 0xa0, 0x9d, 0xf7, 0xaa, 0x7a, + 0x09, 0xed, 0xe1, 0x8d, 0x38, 0xb2, 0x0a, 0x10, 0x17, 0x4b, 0xf4, 0x01, 0xac, 0x2a, 0xe3, 0x3e, + 0x15, 0x82, 0x4c, 0xb3, 0x86, 0xef, 0xc6, 0x91, 0x75, 0x05, 0xc7, 0x57, 0x2c, 0xfb, 0x07, 0x58, + 0xd3, 0xc9, 0xe4, 0xaf, 0xf3, 0x3f, 0xf2, 0xb9, 0x0d, 0x4d, 0xfa, 0x58, 0xd2, 0x40, 0x66, 0x85, + 0x44, 0xe5, 0x76, 0x38, 0x50, 0xae, 0xe1, 0x7a, 0x7a, 0xfe, 0x8c, 0x8a, 0xb3, 0x85, 0xfd, 0xcc, + 0x80, 0x86, 0x26, 0x21, 0x0b, 0xea, 0x42, 0x12, 0x2e, 0xd5, 0x36, 0xd5, 0x61, 0x3b, 0x8e, 0x2c, + 0x0d, 0x60, 0xfd, 0x97, 0x64, 0x41, 0x03, 0x47, 0x1d, 0xba, 0xaa, 0xb3, 0xa0, 0x81, 0x83, 0x93, + 0x1f, 0xd4, 0x87, 0x96, 0xe4, 0x64, 0x42, 0x4f, 0x5d, 0x27, 0x7d, 0xa2, 0xd9, 0xbb, 0x52, 0xf0, + 0x91, 0x83, 0x3e, 0x85, 0x16, 0x4f, 0x8f, 0xd3, 0xab, 0xab, 0x01, 0xb2, 0x39, 0xd0, 0x33, 0x6f, + 0x90, 0xcd, 0xbc, 0xc1, 0x9d, 0x60, 0x31, 0x5c, 0x8d, 0x23, 0x2b, 0x67, 0xe2, 0x7c, 0x85, 0x6e, + 0x02, 0x52, 0xe7, 0x3a, 0x95, 0xae, 0x4f, 0x85, 0x24, 0x7e, 0x78, 0xea, 0x27, 0x2f, 0xd4, 0xd8, + 0xa9, 0xe2, 0xae, 0xf2, 0x9c, 0x64, 0x8e, 0xfb, 0xe2, 0x8b, 0x5a, 0xab, 0xda, 0xad, 0xd9, 0x7f, + 0x19, 0xd0, 0xfc, 0x3a, 0x94, 0x2e, 0x0b, 0x04, 0x7a, 0x13, 0x6e, 0xa8, 0xa2, 0xde, 0x75, 0x05, + 0x19, 0x7b, 0xd4, 0x51, 0xa7, 0x6c, 0xe1, 0xab, 0x20, 0x7a, 0x1b, 0xba, 0xa3, 0x19, 0xe1, 0x8e, + 0x1b, 0x4c, 0x73, 0xe2, 0x8a, 0x22, 0xbe, 0x84, 0xa3, 0x3e, 0x74, 0x4e, 0x98, 0x24, 0x9e, 0x72, + 0x08, 0x75, 0xb7, 0x75, 0x5c, 0x86, 0xd0, 0x1e, 0x6c, 0x1e, 0x05, 0x42, 0x92, 0x40, 0x8e, 0x42, + 0xcf, 0x95, 0x79, 0xc4, 0x9a, 0x8a, 0x78, 0xad, 0xef, 0x45, 0xcd, 0x51, 0x20, 0x29, 0x7f, 0x48, + 0x3c, 0x55, 0xb3, 0x2a, 0xbe, 0xd6, 0x67, 0x1f, 0xc0, 0xfa, 0x37, 0x49, 0x05, 0x92, 0x39, 0xed, + 0x0a, 0xe9, 0x4e, 0xd4, 0xd6, 0x07, 0x42, 0xba, 0x3e, 0x91, 0xd4, 0x19, 0x51, 0xee, 0x52, 0xb1, + 0xcf, 0xe6, 0x81, 0xbe, 0xdb, 0x1a, 0xbe, 0xd6, 0x67, 0xff, 0x6e, 0x00, 0xd2, 0x8d, 0x77, 0x78, + 0x72, 0x72, 0x9c, 0x37, 0xdf, 0x6b, 0xd0, 0x9e, 0x24, 0xe8, 0x69, 0xde, 0x82, 0xb8, 0xa5, 0x80, + 0x2f, 0xe9, 0x02, 0x59, 0xd0, 0xd1, 0x5f, 0x89, 0xd3, 0x09, 0x73, 0xf4, 0x83, 0xa8, 0x63, 0xd0, + 0xd0, 0x3e, 0x73, 0x28, 0xfa, 0x08, 0x9a, 0xb3, 0x74, 0x1c, 0x57, 0x5f, 0x1e, 0xc7, 0xc5, 0x76, + 0x7a, 0xfe, 0xe2, 0x8c, 0x8c, 0x10, 0xd4, 0xc6, 0xcc, 0x59, 0xa8, 0x5a, 0xad, 0x62, 0xb5, 0xb6, + 0x3f, 0x81, 0xee, 0x8b, 0x82, 0x84, 0x17, 0xe4, 0x5f, 0x42, 0xac, 0xd6, 0x68, 0x13, 0xea, 0x6a, + 0x66, 0xe8, 0xf7, 0x89, 0xb5, 0x31, 0x3c, 0x38, 0xbf, 0x30, 0x2b, 0x4f, 0x2f, 0xcc, 0xca, 0xf3, + 0x0b, 0xd3, 0xf8, 0x69, 0x69, 0x1a, 0x7f, 0x2c, 0x4d, 0xe3, 0xc9, 0xd2, 0x34, 0xce, 0x97, 0xa6, + 0xf1, 0xe7, 0xd2, 0x34, 0xfe, 0x5e, 0x9a, 0x95, 0xe7, 0x4b, 0xd3, 0xf8, 0xed, 0xd2, 0xac, 0x9c, + 0x5f, 0x9a, 0x95, 0xa7, 0x97, 0x66, 0xe5, 0xfb, 0x75, 0x95, 0xad, 0xef, 0x3a, 0x8e, 0x47, 0x1f, + 0x11, 0x4e, 0xc7, 0x0d, 0xd5, 0xae, 0xef, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x48, 0x21, + 0x5e, 0x2c, 0x08, 0x00, 0x00, } func (this *PrometheusHeader) Equal(that interface{}) bool { @@ -899,6 +962,36 @@ func (this *SampleStream) Equal(that interface{}) bool { } return true } +func (this *CachedError) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CachedError) + if !ok { + that2, ok := that.(CachedError) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Key != that1.Key { + return false + } + if this.ErrorType != that1.ErrorType { + return false + } + if this.ErrorMessage != that1.ErrorMessage { + return false + } + return true +} func (this *CachedResponse) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1164,6 +1257,18 @@ func (this *SampleStream) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *CachedError) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&querymiddleware.CachedError{") + s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") + s = append(s, "ErrorType: "+fmt.Sprintf("%#v", this.ErrorType)+",\n") + s = append(s, "ErrorMessage: "+fmt.Sprintf("%#v", this.ErrorMessage)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *CachedResponse) GoString() string { if this == nil { return "nil" @@ -1491,6 +1596,50 @@ func (m *SampleStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *CachedError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CachedError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CachedError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ErrorMessage) > 0 { + i -= len(m.ErrorMessage) + copy(dAtA[i:], m.ErrorMessage) + i = encodeVarintModel(dAtA, i, uint64(len(m.ErrorMessage))) + i-- + dAtA[i] = 0x1a + } + if len(m.ErrorType) > 0 { + i -= len(m.ErrorType) + copy(dAtA[i:], m.ErrorType) + i = encodeVarintModel(dAtA, i, uint64(len(m.ErrorType))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintModel(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *CachedResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1895,6 +2044,27 @@ func (m *SampleStream) Size() (n int) { return n } +func (m *CachedError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovModel(uint64(l)) + } + l = len(m.ErrorType) + if l > 0 { + n += 1 + l + sovModel(uint64(l)) + } + l = len(m.ErrorMessage) + if l > 0 { + n += 1 + l + sovModel(uint64(l)) + } + return n +} + func (m *CachedResponse) Size() (n int) { if m == nil { return 0 @@ -2095,6 +2265,18 @@ func (this *SampleStream) String() string { }, "") return s } +func (this *CachedError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CachedError{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ErrorType:` + fmt.Sprintf("%v", this.ErrorType) + `,`, + `ErrorMessage:` + fmt.Sprintf("%v", this.ErrorMessage) + `,`, + `}`, + }, "") + return s +} func (this *CachedResponse) String() string { if this == nil { return "nil" @@ -2860,6 +3042,155 @@ func (m *SampleStream) Unmarshal(dAtA []byte) error { } return nil } +func (m *CachedError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowModel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CachedError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CachedError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowModel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthModel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthModel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowModel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthModel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthModel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowModel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthModel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthModel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipModel(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthModel + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthModel + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *CachedResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/frontend/querymiddleware/model.proto b/pkg/frontend/querymiddleware/model.proto index bd181763c06..0a4951cfcd5 100644 --- a/pkg/frontend/querymiddleware/model.proto +++ b/pkg/frontend/querymiddleware/model.proto @@ -43,6 +43,12 @@ message SampleStream { repeated cortexpb.FloatHistogramPair histograms = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "histograms"]; } +message CachedError { + string key = 1 [(gogoproto.jsontag) = "key"]; + string errorType = 2 [(gogoproto.jsontag) = "errorType"]; + string errorMessage = 3 [(gogoproto.jsontag) = "errorMessage"]; +} + message CachedResponse { string key = 1 [(gogoproto.jsontag) = "key"]; diff --git a/pkg/frontend/querymiddleware/results_cache.go b/pkg/frontend/querymiddleware/results_cache.go index 9102bdd3a43..ae6d6609332 100644 --- a/pkg/frontend/querymiddleware/results_cache.go +++ b/pkg/frontend/querymiddleware/results_cache.go @@ -12,6 +12,7 @@ import ( "fmt" "hash/fnv" "net/http" + "slices" "sort" "strings" "time" @@ -263,13 +264,12 @@ func isRequestCachable(req MetricsQueryRequest, maxCacheTime int64, cacheUnalign return true, "" } -// isResponseCachable says whether the response should be cached or not. -func isResponseCachable(r Response, logger log.Logger) bool { - headerValues := getHeaderValuesWithName(r, cacheControlHeader) - for _, v := range headerValues { - if v == noStoreValue { - level.Debug(logger).Log("msg", fmt.Sprintf("%s header in response is equal to %s, not caching the response", cacheControlHeader, noStoreValue)) - return false +// isResponseCachable returns true if a response hasn't explicitly disabled caching +// via an HTTP header, false otherwise. +func isResponseCachable(r Response) bool { + for _, hv := range r.GetHeaders() { + if hv.GetName() == cacheControlHeader { + return !slices.Contains(hv.GetValues(), noStoreValue) } } @@ -331,18 +331,6 @@ func areEvaluationTimeModifiersCachable(r MetricsQueryRequest, maxCacheTime int6 return cachable } -func getHeaderValuesWithName(r Response, headerName string) (headerValues []string) { - for _, hv := range r.GetHeaders() { - if hv.GetName() != headerName { - continue - } - - headerValues = append(headerValues, hv.GetValues()...) - } - - return -} - // mergeCacheExtentsForRequest merges the provided cache extents for the input request and returns merged extents. // The input extents can be overlapping and are not required to be sorted. func mergeCacheExtentsForRequest(ctx context.Context, r MetricsQueryRequest, merger Merger, extents []Extent) ([]Extent, error) { @@ -642,6 +630,6 @@ func cacheHashKey(key string) string { hasher := fnv.New64a() _, _ = hasher.Write([]byte(key)) // This'll never error. - // Hex because memcache errors for the bytes produced by the hash. + // Hex because memcache keys must be non-whitespace non-control ASCII return hex.EncodeToString(hasher.Sum(nil)) } diff --git a/pkg/frontend/querymiddleware/results_cache_test.go b/pkg/frontend/querymiddleware/results_cache_test.go index ab0391a4ef6..cccd7233888 100644 --- a/pkg/frontend/querymiddleware/results_cache_test.go +++ b/pkg/frontend/querymiddleware/results_cache_test.go @@ -363,7 +363,7 @@ func TestIsResponseCachable(t *testing.T) { } { { t.Run(tc.name, func(t *testing.T) { - ret := isResponseCachable(tc.response, log.NewNopLogger()) + ret := isResponseCachable(tc.response) require.Equal(t, tc.expected, ret) }) } diff --git a/pkg/frontend/querymiddleware/roundtrip.go b/pkg/frontend/querymiddleware/roundtrip.go index f6d2ed82bb6..ee465164244 100644 --- a/pkg/frontend/querymiddleware/roundtrip.go +++ b/pkg/frontend/querymiddleware/roundtrip.go @@ -58,6 +58,7 @@ type Config struct { SplitQueriesByInterval time.Duration `yaml:"split_queries_by_interval" category:"advanced"` ResultsCacheConfig `yaml:"results_cache"` CacheResults bool `yaml:"cache_results"` + CacheErrors bool `yaml:"cache_errors" category:"experimental"` MaxRetries int `yaml:"max_retries" category:"advanced"` NotRunningTimeout time.Duration `yaml:"not_running_timeout" category:"advanced"` ShardedQueries bool `yaml:"parallelize_shardable_queries"` @@ -87,6 +88,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.NotRunningTimeout, "query-frontend.not-running-timeout", 2*time.Second, "Maximum time to wait for the query-frontend to become ready before rejecting requests received before the frontend was ready. 0 to disable (i.e. fail immediately if a request is received while the frontend is still starting up)") f.DurationVar(&cfg.SplitQueriesByInterval, "query-frontend.split-queries-by-interval", 24*time.Hour, "Split range queries by an interval and execute in parallel. You should use a multiple of 24 hours to optimize querying blocks. 0 to disable it.") f.BoolVar(&cfg.CacheResults, "query-frontend.cache-results", false, "Cache query results.") + f.BoolVar(&cfg.CacheErrors, "query-frontend.cache-errors", false, "Cache non-transient errors from queries.") f.BoolVar(&cfg.ShardedQueries, "query-frontend.parallelize-shardable-queries", false, "True to enable query sharding.") f.BoolVar(&cfg.PrunedQueries, "query-frontend.prune-queries", false, "True to enable pruning dead code (eg. expressions that cannot produce any results) and simplifying expressions (eg. expressions that can be evaluated immediately) in queries.") f.Uint64Var(&cfg.TargetSeriesPerShard, "query-frontend.query-sharding-target-series-per-shard", 0, "How many series a single sharded partial query should load at most. This is not a strict requirement guaranteed to be honoured by query sharding, but a hint given to the query sharding when the query execution is initially planned. 0 to disable cardinality-based hints.") @@ -104,7 +106,7 @@ func (cfg *Config) Validate() error { } } - if cfg.CacheResults || cfg.cardinalityBasedShardingEnabled() { + if cfg.CacheResults || cfg.CacheErrors || cfg.cardinalityBasedShardingEnabled() { if err := cfg.ResultsCacheConfig.Validate(); err != nil { return errors.Wrap(err, "invalid query-frontend results cache config") } @@ -337,6 +339,14 @@ func newQueryMiddlewares( newStepAlignMiddleware(limits, log, registerer), ) + if cfg.CacheResults && cfg.CacheErrors { + queryRangeMiddleware = append( + queryRangeMiddleware, + newInstrumentMiddleware("error_caching", metrics), + newErrorCachingMiddleware(cacheClient, limits, resultsCacheEnabledByOption, log, registerer), + ) + } + // Inject the middleware to split requests by interval + results cache (if at least one of the two is enabled). if cfg.SplitQueriesByInterval > 0 || cfg.CacheResults { queryRangeMiddleware = append(queryRangeMiddleware, newInstrumentMiddleware("split_by_interval_and_results_cache", metrics), newSplitAndCacheMiddleware( diff --git a/pkg/frontend/querymiddleware/split_and_cache.go b/pkg/frontend/querymiddleware/split_and_cache.go index 629591e364f..83393f7113f 100644 --- a/pkg/frontend/querymiddleware/split_and_cache.go +++ b/pkg/frontend/querymiddleware/split_and_cache.go @@ -262,7 +262,7 @@ func (s *splitAndCacheMiddleware) Do(ctx context.Context, req MetricsQueryReques for downstreamIdx, downstreamReq := range splitReq.downstreamRequests { downstreamRes := splitReq.downstreamResponses[downstreamIdx] - if !isResponseCachable(downstreamRes, s.logger) { + if !isResponseCachable(downstreamRes) { continue } diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 19cfccbdbe0..d3a89abf11e 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -167,6 +167,7 @@ type Limits struct { ResultsCacheTTLForOutOfOrderTimeWindow model.Duration `yaml:"results_cache_ttl_for_out_of_order_time_window" json:"results_cache_ttl_for_out_of_order_time_window"` ResultsCacheTTLForCardinalityQuery model.Duration `yaml:"results_cache_ttl_for_cardinality_query" json:"results_cache_ttl_for_cardinality_query"` ResultsCacheTTLForLabelsQuery model.Duration `yaml:"results_cache_ttl_for_labels_query" json:"results_cache_ttl_for_labels_query"` + ResultsCacheTTLForErrors model.Duration `yaml:"results_cache_ttl_for_errors" json:"results_cache_ttl_for_errors" category:"experimental"` ResultsCacheForUnalignedQueryEnabled bool `yaml:"cache_unaligned_requests" json:"cache_unaligned_requests" category:"advanced"` MaxQueryExpressionSizeBytes int `yaml:"max_query_expression_size_bytes" json:"max_query_expression_size_bytes"` BlockedQueries []*BlockedQuery `yaml:"blocked_queries,omitempty" json:"blocked_queries,omitempty" doc:"nocli|description=List of queries to block." category:"experimental"` @@ -347,6 +348,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.Var(&l.ResultsCacheTTLForOutOfOrderTimeWindow, resultsCacheTTLForOutOfOrderWindowFlag, fmt.Sprintf("Time to live duration for cached query results if query falls into out-of-order time window. This is lower than -%s so that incoming out-of-order samples are returned in the query results sooner.", resultsCacheTTLFlag)) f.Var(&l.ResultsCacheTTLForCardinalityQuery, "query-frontend.results-cache-ttl-for-cardinality-query", "Time to live duration for cached cardinality query results. The value 0 disables the cache.") f.Var(&l.ResultsCacheTTLForLabelsQuery, "query-frontend.results-cache-ttl-for-labels-query", "Time to live duration for cached label names and label values query results. The value 0 disables the cache.") + _ = l.ResultsCacheTTLForErrors.Set("5m") + f.Var(&l.ResultsCacheTTLForErrors, "query-frontend.results-cache-ttl-for-errors", "Time to live duration for cached non-transient errors") f.BoolVar(&l.ResultsCacheForUnalignedQueryEnabled, "query-frontend.cache-unaligned-requests", false, "Cache requests that are not step-aligned.") f.IntVar(&l.MaxQueryExpressionSizeBytes, MaxQueryExpressionSizeBytesFlag, 0, "Max size of the raw query, in bytes. This limit is enforced by the query-frontend for instant, range and remote read queries. 0 to not apply a limit to the size of the query.") f.BoolVar(&l.AlignQueriesWithStep, alignQueriesWithStepFlag, false, "Mutate incoming queries to align their start and end with their step to improve result caching.") @@ -1048,6 +1051,10 @@ func (o *Overrides) ResultsCacheTTLForLabelsQuery(user string) time.Duration { return time.Duration(o.getOverridesForUser(user).ResultsCacheTTLForLabelsQuery) } +func (o *Overrides) ResultsCacheTTLForErrors(user string) time.Duration { + return time.Duration(o.getOverridesForUser(user).ResultsCacheTTLForErrors) +} + func (o *Overrides) ResultsCacheForUnalignedQueryEnabled(userID string) bool { return o.getOverridesForUser(userID).ResultsCacheForUnalignedQueryEnabled }