Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deprecate cache interval #2040

Merged
merged 9 commits into from
Jan 30, 2020
Merged
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

## master / unreleased

* [CHANGE] Removed unnecessary `frontend.cache-split-interval` in favor of `querier.split-queries-by-interval` both to reduce configuration complexity and guarantee alignment of these two configs. #2040
* [CHANGE] Removed unnecessary `frontend.cache-split-interval` in favor of `querier.split-queries-by-interval` both to reduce configuration complexity and guarantee alignment of these two configs. Starting from now, `-querier.cache-results` may only be enabled in conjunction with `-querier.split-queries-by-interval` (previously the cache interval default was `24h` so if you want to preserve the same behaviour you should set `-querier.split-queries-by-interval=24h`). #2040
* [CHANGE] Removed remaining support for using denormalised tokens in the ring. If you're still running ingesters with denormalised tokens (Cortex 0.4 or earlier, with `-ingester.normalise-tokens=false`), such ingesters will now be completely invisible to distributors and need to be either switched to Cortex 0.6.0 or later, or be configured to use normalised tokens. #2034
* [CHANGE] Moved `--store.min-chunk-age` to the Querier config as `--querier.query-store-after`, allowing the store to be skipped during query time if the metrics wouldn't be found. The YAML config option `ingestermaxquerylookback` has been renamed to `query_ingesters_within` to match its CLI flag. #1893
* `--store.min-chunk-age` has been removed
Expand Down
17 changes: 10 additions & 7 deletions pkg/querier/queryrange/results_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,20 @@ func (e ExtractorFunc) Extract(start, end int64, from Response) Response {
}

// CacheSplitter generates cache keys. This is a useful interface for downstream
// consumers who wish to impl their own strategies.
// consumers who wish to implement their own strategies.
type CacheSplitter interface {
pracucci marked this conversation as resolved.
Show resolved Hide resolved
GenerateCacheKey(userID string, r Request) string
}

// constSplitter is a utility for using a constant split interval when determining cache keys
type constSplitter time.Duration

// GenerateCacheKey generates a cache key based on the userID, Request and interval.
func (t constSplitter) GenerateCacheKey(userID string, r Request) string {
currentInterval := r.GetStart() / int64(time.Duration(t)/time.Millisecond)
return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval)
}

// PrometheusResponseExtractor is an `Extractor` for a Prometheus query range response.
var PrometheusResponseExtractor = ExtractorFunc(func(start, end int64, from Response) Response {
promRes := from.(*PrometheusResponse)
Expand Down Expand Up @@ -376,12 +385,6 @@ func (s resultsCache) filterRecentExtents(req Request, extents []Extent) ([]Exte
return extents, nil
}

// generateKey generates a cache key based on the userID, Request and interval.
func generateKey(userID string, r Request, interval time.Duration) string {
currentInterval := r.GetStart() / int64(interval/time.Millisecond)
return fmt.Sprintf("%s:%s:%d:%d", userID, r.GetQuery(), r.GetStep(), currentInterval)
}

func (s resultsCache) get(ctx context.Context, key string) ([]Extent, bool) {
found, bufs, _ := s.cache.Fetch(ctx, []string{cache.HashKey(key)})
if len(found) != 1 {
Expand Down
8 changes: 4 additions & 4 deletions pkg/querier/queryrange/results_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ func TestResultsCache(t *testing.T) {
rcm, _, err := NewResultsCacheMiddleware(
log.NewNopLogger(),
cfg,
Config{SplitQueriesByInterval: 24 * time.Hour},
constSplitter(24*time.Hour),
owen-d marked this conversation as resolved.
Show resolved Hide resolved
fakeLimits{},
PrometheusCodec,
PrometheusResponseExtractor,
Expand Down Expand Up @@ -308,7 +308,7 @@ func TestResultsCacheRecent(t *testing.T) {
var cfg ResultsCacheConfig
flagext.DefaultValues(&cfg)
cfg.CacheConfig.Cache = cache.NewMockCache()
rcm, _, err := NewResultsCacheMiddleware(log.NewNopLogger(), cfg, Config{SplitQueriesByInterval: 24 * time.Hour}, fakeLimits{}, PrometheusCodec, PrometheusResponseExtractor)
rcm, _, err := NewResultsCacheMiddleware(log.NewNopLogger(), cfg, constSplitter(24*time.Hour), fakeLimits{}, PrometheusCodec, PrometheusResponseExtractor)
owen-d marked this conversation as resolved.
Show resolved Hide resolved
require.NoError(t, err)

req := parsedRequest.WithStartEnd(int64(model.Now())-(60*1e3), int64(model.Now()))
Expand Down Expand Up @@ -343,7 +343,7 @@ func Test_resultsCache_MissingData(t *testing.T) {
rm, _, err := NewResultsCacheMiddleware(
log.NewNopLogger(),
cfg,
Config{SplitQueriesByInterval: 24 * time.Hour},
constSplitter(24*time.Hour),
owen-d marked this conversation as resolved.
Show resolved Hide resolved
fakeLimits{},
PrometheusCodec,
PrometheusResponseExtractor,
Expand Down Expand Up @@ -398,7 +398,7 @@ func Test_generateKey(t *testing.T) {
}
for _, tt := range tests {
t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) {
owen-d marked this conversation as resolved.
Show resolved Hide resolved
if got := generateKey("fake", tt.r, tt.interval); got != tt.want {
if got := constSplitter(tt.interval).GenerateCacheKey("fake", tt.r); got != tt.want {
t.Errorf("generateKey() = %v, want %v", got, tt.want)
}
})
Expand Down
7 changes: 1 addition & 6 deletions pkg/querier/queryrange/roundtrip.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,6 @@ func (cfg *Config) Validate() error {
return nil
}

// GenerateCacheKey impls CacheSplitter
func (cfg Config) GenerateCacheKey(userID string, r Request) string {
return generateKey(userID, r, cfg.SplitQueriesByInterval)
}

// HandlerFunc is like http.HandlerFunc, but for Handler.
type HandlerFunc func(context.Context, Request) (Response, error)

Expand Down Expand Up @@ -121,7 +116,7 @@ func NewTripperware(cfg Config, log log.Logger, limits Limits, codec Codec, cach
}
var c cache.Cache
if cfg.CacheResults {
queryCacheMiddleware, cache, err := NewResultsCacheMiddleware(log, cfg.ResultsCacheConfig, cfg, limits, codec, cacheExtractor)
queryCacheMiddleware, cache, err := NewResultsCacheMiddleware(log, cfg.ResultsCacheConfig, constSplitter(cfg.SplitQueriesByInterval), limits, codec, cacheExtractor)
if err != nil {
return nil, nil, err
}
Expand Down