From 6005fdb1dc0c8550d0b91d4ae28d781aa24ec2b1 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Mon, 18 Sep 2023 11:00:48 +0530 Subject: [PATCH 01/14] embedded cache: enabled if not other cache is configured. --- cmd/migrate/main.go | 4 - pkg/loki/config_wrapper.go | 19 ++--- pkg/loki/config_wrapper_test.go | 78 +++++++++---------- .../queryrangebase/results_cache.go | 7 +- pkg/querier/queryrange/roundtrip_test.go | 24 +++--- pkg/storage/chunk/cache/cache.go | 40 ++-------- pkg/storage/config/store.go | 6 +- pkg/storage/factory.go | 3 - 8 files changed, 67 insertions(+), 114 deletions(-) diff --git a/cmd/migrate/main.go b/cmd/migrate/main.go index e70f0359a3dbd..5eb6fc994dc90 100644 --- a/cmd/migrate/main.go +++ b/cmd/migrate/main.go @@ -76,17 +76,13 @@ func main() { // This is a little brittle, if we add a new cache it may easily get missed here but it's important to disable // any of the chunk caches to save on memory because we write chunks to the cache when we call Put operations on the store. - sourceConfig.ChunkStoreConfig.ChunkCacheConfig.EnableFifoCache = false sourceConfig.ChunkStoreConfig.ChunkCacheConfig.MemcacheClient = defaultsConfig.ChunkStoreConfig.ChunkCacheConfig.MemcacheClient sourceConfig.ChunkStoreConfig.ChunkCacheConfig.Redis = defaultsConfig.ChunkStoreConfig.ChunkCacheConfig.Redis - sourceConfig.ChunkStoreConfig.WriteDedupeCacheConfig.EnableFifoCache = false sourceConfig.ChunkStoreConfig.WriteDedupeCacheConfig.MemcacheClient = defaultsConfig.ChunkStoreConfig.WriteDedupeCacheConfig.MemcacheClient sourceConfig.ChunkStoreConfig.WriteDedupeCacheConfig.Redis = defaultsConfig.ChunkStoreConfig.WriteDedupeCacheConfig.Redis - destConfig.ChunkStoreConfig.ChunkCacheConfig.EnableFifoCache = false destConfig.ChunkStoreConfig.ChunkCacheConfig.MemcacheClient = defaultsConfig.ChunkStoreConfig.ChunkCacheConfig.MemcacheClient destConfig.ChunkStoreConfig.ChunkCacheConfig.Redis = defaultsConfig.ChunkStoreConfig.ChunkCacheConfig.Redis - destConfig.ChunkStoreConfig.WriteDedupeCacheConfig.EnableFifoCache = false destConfig.ChunkStoreConfig.WriteDedupeCacheConfig.MemcacheClient = defaultsConfig.ChunkStoreConfig.WriteDedupeCacheConfig.MemcacheClient destConfig.ChunkStoreConfig.WriteDedupeCacheConfig.Redis = defaultsConfig.ChunkStoreConfig.WriteDedupeCacheConfig.Redis diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go index ab05943a37afd..b477cb387d01a 100644 --- a/pkg/loki/config_wrapper.go +++ b/pkg/loki/config_wrapper.go @@ -120,7 +120,7 @@ func (c *ConfigWrapper) ApplyDynamicConfig() cfg.Source { betterTSDBShipperDefaults(r, &defaults, r.SchemaConfig.Configs[i]) } - applyFIFOCacheConfig(r) + applyEmbeddedCacheConfig(r) applyIngesterFinalSleep(r) applyIngesterReplicationFactor(r) applyChunkRetain(r, &defaults) @@ -557,23 +557,18 @@ func betterTSDBShipperDefaults(cfg, defaults *ConfigWrapper, period config.Perio } } -// applyFIFOCacheConfig turns on FIFO cache for the chunk store, for the query range results, -// and for the index stats results, but only if no other cache storage is configured (redis or memcache). -// This behavior is only applied for the chunk store cache, for the query range results cache, and for -// the index stats results (i.e: not applicable for the index queries cache or for the write dedupe cache). -func applyFIFOCacheConfig(r *ConfigWrapper) { +// applyEmbeddedCacheConfig turns on Embedded cache for the chunk store, query range results, +// index stats and volume results only if no other cache storage is configured (redis or memcache). +// Not applicable for the index queries cache or for the write dedupe cache. +func applyEmbeddedCacheConfig(r *ConfigWrapper) { chunkCacheConfig := r.ChunkStoreConfig.ChunkCacheConfig if !cache.IsCacheConfigured(chunkCacheConfig) { - r.ChunkStoreConfig.ChunkCacheConfig.EnableFifoCache = true + r.ChunkStoreConfig.ChunkCacheConfig.EmbeddedCache.Enabled = true } resultsCacheConfig := r.QueryRange.ResultsCacheConfig.CacheConfig if !cache.IsCacheConfigured(resultsCacheConfig) { - r.QueryRange.ResultsCacheConfig.CacheConfig.EnableFifoCache = true - // The query results fifocache is still in Cortex so we couldn't change the flag defaults - // so instead we will override them here. - r.QueryRange.ResultsCacheConfig.CacheConfig.Fifocache.MaxSizeBytes = "1GB" - r.QueryRange.ResultsCacheConfig.CacheConfig.Fifocache.TTL = 1 * time.Hour + r.QueryRange.ResultsCacheConfig.CacheConfig.EmbeddedCache.Enabled = true } indexStatsCacheConfig := r.QueryRange.StatsCacheConfig.CacheConfig diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go index 3f54adda7ebaa..5f0a534695f67 100644 --- a/pkg/loki/config_wrapper_test.go +++ b/pkg/loki/config_wrapper_test.go @@ -863,9 +863,9 @@ query_range: memcached_client: host: memcached.host.org` -func TestDefaultFIFOCacheBehavior(t *testing.T) { +func TestDefaultEmbeddedCacheBehavior(t *testing.T) { t.Run("for the chunk cache config", func(t *testing.T) { - t.Run("no FIFO cache enabled by default if Redis is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Redis is set", func(t *testing.T) { configFileString := `--- chunk_store_config: chunk_cache_config: @@ -874,10 +874,10 @@ chunk_store_config: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, "endpoint.redis.org", config.ChunkStoreConfig.ChunkCacheConfig.Redis.Endpoint) - assert.False(t, config.ChunkStoreConfig.ChunkCacheConfig.EnableFifoCache) + assert.False(t, config.ChunkStoreConfig.ChunkCacheConfig.EmbeddedCache.Enabled) }) - t.Run("no FIFO cache enabled by default if Memcache is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Memcache is set", func(t *testing.T) { configFileString := `--- chunk_store_config: chunk_cache_config: @@ -886,17 +886,17 @@ chunk_store_config: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, "host.memcached.org", config.ChunkStoreConfig.ChunkCacheConfig.MemcacheClient.Host) - assert.False(t, config.ChunkStoreConfig.ChunkCacheConfig.EnableFifoCache) + assert.False(t, config.ChunkStoreConfig.ChunkCacheConfig.EmbeddedCache.Enabled) }) - t.Run("FIFO cache is enabled by default if no other cache is set", func(t *testing.T) { + t.Run("embedded cache is enabled by default if no other cache is set", func(t *testing.T) { config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) - assert.True(t, config.ChunkStoreConfig.ChunkCacheConfig.EnableFifoCache) + assert.True(t, config.ChunkStoreConfig.ChunkCacheConfig.EmbeddedCache.Enabled) }) }) t.Run("for the write dedupe cache config", func(t *testing.T) { - t.Run("no FIFO cache enabled by default if Redis is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Redis is set", func(t *testing.T) { configFileString := `--- chunk_store_config: write_dedupe_cache_config: @@ -905,10 +905,10 @@ chunk_store_config: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, "endpoint.redis.org", config.ChunkStoreConfig.WriteDedupeCacheConfig.Redis.Endpoint) - assert.False(t, config.ChunkStoreConfig.WriteDedupeCacheConfig.EnableFifoCache) + assert.False(t, config.ChunkStoreConfig.WriteDedupeCacheConfig.EmbeddedCache.Enabled) }) - t.Run("no FIFO cache enabled by default if Memcache is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Memcache is set", func(t *testing.T) { configFileString := `--- chunk_store_config: write_dedupe_cache_config: @@ -917,17 +917,17 @@ chunk_store_config: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, "host.memcached.org", config.ChunkStoreConfig.WriteDedupeCacheConfig.MemcacheClient.Host) - assert.False(t, config.ChunkStoreConfig.WriteDedupeCacheConfig.EnableFifoCache) + assert.False(t, config.ChunkStoreConfig.WriteDedupeCacheConfig.EmbeddedCache.Enabled) }) - t.Run("no FIFO cache is enabled by default even if no other cache is set", func(t *testing.T) { + t.Run("no embedded cache is enabled by default even if no other cache is set", func(t *testing.T) { config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) - assert.False(t, config.ChunkStoreConfig.WriteDedupeCacheConfig.EnableFifoCache) + assert.False(t, config.ChunkStoreConfig.WriteDedupeCacheConfig.EmbeddedCache.Enabled) }) }) t.Run("for the index queries cache config", func(t *testing.T) { - t.Run("no FIFO cache enabled by default if Redis is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Redis is set", func(t *testing.T) { configFileString := `--- storage_config: index_queries_cache_config: @@ -936,10 +936,10 @@ storage_config: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, "endpoint.redis.org", config.StorageConfig.IndexQueriesCacheConfig.Redis.Endpoint) - assert.False(t, config.StorageConfig.IndexQueriesCacheConfig.EnableFifoCache) + assert.False(t, config.StorageConfig.IndexQueriesCacheConfig.EmbeddedCache.Enabled) }) - t.Run("no FIFO cache enabled by default if Memcache is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Memcache is set", func(t *testing.T) { configFileString := `--- storage_config: index_queries_cache_config: @@ -949,17 +949,17 @@ storage_config: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, "host.memcached.org", config.StorageConfig.IndexQueriesCacheConfig.MemcacheClient.Host) - assert.False(t, config.StorageConfig.IndexQueriesCacheConfig.EnableFifoCache) + assert.False(t, config.StorageConfig.IndexQueriesCacheConfig.EmbeddedCache.Enabled) }) - t.Run("no FIFO cache is enabled by default even if no other cache is set", func(t *testing.T) { + t.Run("no embedded cache is enabled by default even if no other cache is set", func(t *testing.T) { config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) - assert.False(t, config.StorageConfig.IndexQueriesCacheConfig.EnableFifoCache) + assert.False(t, config.StorageConfig.IndexQueriesCacheConfig.EmbeddedCache.Enabled) }) }) t.Run("for the query range results cache config", func(t *testing.T) { - t.Run("no FIFO cache enabled by default if Redis is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Redis is set", func(t *testing.T) { configFileString := `--- query_range: results_cache: @@ -969,23 +969,23 @@ query_range: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, config.QueryRange.ResultsCacheConfig.CacheConfig.Redis.Endpoint, "endpoint.redis.org") - assert.False(t, config.QueryRange.ResultsCacheConfig.CacheConfig.EnableFifoCache) + assert.False(t, config.QueryRange.ResultsCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) - t.Run("no FIFO cache enabled by default if Memcache is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Memcache is set", func(t *testing.T) { config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil) assert.EqualValues(t, "memcached.host.org", config.QueryRange.ResultsCacheConfig.CacheConfig.MemcacheClient.Host) - assert.False(t, config.QueryRange.ResultsCacheConfig.CacheConfig.EnableFifoCache) + assert.False(t, config.QueryRange.ResultsCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) - t.Run("FIFO cache is enabled by default if no other cache is set", func(t *testing.T) { + t.Run("embedded cache is enabled by default if no other cache is set", func(t *testing.T) { config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) - assert.True(t, config.QueryRange.ResultsCacheConfig.CacheConfig.EnableFifoCache) + assert.True(t, config.QueryRange.ResultsCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) }) t.Run("for the index stats results cache config", func(t *testing.T) { - t.Run("no FIFO cache enabled by default if Redis is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Redis is set", func(t *testing.T) { configFileString := `--- query_range: index_stats_results_cache: @@ -995,10 +995,10 @@ query_range: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, config.QueryRange.StatsCacheConfig.CacheConfig.Redis.Endpoint, "endpoint.redis.org") - assert.False(t, config.QueryRange.StatsCacheConfig.CacheConfig.EnableFifoCache) + assert.False(t, config.QueryRange.StatsCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) - t.Run("no FIFO cache enabled by default if Memcache is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Memcache is set", func(t *testing.T) { configFileString := `--- query_range: index_stats_results_cache: @@ -1008,23 +1008,23 @@ query_range: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, "memcached.host.org", config.QueryRange.StatsCacheConfig.CacheConfig.MemcacheClient.Host) - assert.False(t, config.QueryRange.StatsCacheConfig.CacheConfig.EnableFifoCache) + assert.False(t, config.QueryRange.StatsCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) - t.Run("FIFO cache is enabled by default if no other cache is set", func(t *testing.T) { + t.Run("embedded cache is enabled by default if no other cache is set", func(t *testing.T) { config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) - assert.True(t, config.QueryRange.StatsCacheConfig.CacheConfig.EnableFifoCache) + assert.True(t, config.QueryRange.StatsCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) t.Run("gets results cache config if not configured directly", func(t *testing.T) { config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil) assert.EqualValues(t, "memcached.host.org", config.QueryRange.StatsCacheConfig.CacheConfig.MemcacheClient.Host) - assert.False(t, config.QueryRange.StatsCacheConfig.CacheConfig.EnableFifoCache) + assert.False(t, config.QueryRange.StatsCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) }) t.Run("for the volume results cache config", func(t *testing.T) { - t.Run("no FIFO cache enabled by default if Redis is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Redis is set", func(t *testing.T) { configFileString := `--- query_range: volume_results_cache: @@ -1034,10 +1034,10 @@ query_range: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, config.QueryRange.VolumeCacheConfig.CacheConfig.Redis.Endpoint, "endpoint.redis.org") - assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EnableFifoCache) + assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) - t.Run("no FIFO cache enabled by default if Memcache is set", func(t *testing.T) { + t.Run("no embedded cache enabled by default if Memcache is set", func(t *testing.T) { configFileString := `--- query_range: volume_results_cache: @@ -1047,18 +1047,18 @@ query_range: config, _, _ := configWrapperFromYAML(t, configFileString, nil) assert.EqualValues(t, "memcached.host.org", config.QueryRange.VolumeCacheConfig.CacheConfig.MemcacheClient.Host) - assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EnableFifoCache) + assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) - t.Run("FIFO cache is enabled by default if no other cache is set", func(t *testing.T) { + t.Run("embedded cache is enabled by default if no other cache is set", func(t *testing.T) { config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) - assert.True(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EnableFifoCache) + assert.True(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) t.Run("gets results cache config if not configured directly", func(t *testing.T) { config, _, _ := configWrapperFromYAML(t, defaultResulsCacheString, nil) assert.EqualValues(t, "memcached.host.org", config.QueryRange.VolumeCacheConfig.CacheConfig.MemcacheClient.Host) - assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EnableFifoCache) + assert.False(t, config.QueryRange.VolumeCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) }) } diff --git a/pkg/querier/queryrange/queryrangebase/results_cache.go b/pkg/querier/queryrange/queryrangebase/results_cache.go index dde58eb027e0f..c81193d88e28b 100644 --- a/pkg/querier/queryrange/queryrangebase/results_cache.go +++ b/pkg/querier/queryrange/queryrangebase/results_cache.go @@ -13,7 +13,6 @@ import ( "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/user" "github.com/opentracing/opentracing-go" @@ -31,7 +30,6 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/storage/chunk/cache" - util_log "github.com/grafana/loki/pkg/util/log" "github.com/grafana/loki/pkg/util/math" "github.com/grafana/loki/pkg/util/spanlogger" "github.com/grafana/loki/pkg/util/validation" @@ -80,10 +78,7 @@ type ResultsCacheConfig struct { func (cfg *ResultsCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { cfg.CacheConfig.RegisterFlagsWithPrefix(prefix, "", f) - f.StringVar(&cfg.Compression, prefix+"compression", "", "Use compression in cache. The default is an empty value '', which disables compression. Supported values are: 'snappy' and ''.") - //lint:ignore faillint Need to pass the global logger like this for warning on deprecated methods - flagext.DeprecatedFlag(f, prefix+"cache-split-interval", "Deprecated: The maximum interval expected for each request, results will be cached per single interval. This behavior is now determined by querier.split-queries-by-interval.", util_log.Logger) } // RegisterFlags registers flags. @@ -99,7 +94,7 @@ func (cfg *ResultsCacheConfig) Validate() error { return errors.Errorf("unsupported compression type: %s", cfg.Compression) } - return cfg.CacheConfig.Validate() + return nil } // Extractor is used by the cache to extract a subset of a response from a cache entry. diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index 336c00d4865bf..e83cc6e896302 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -49,10 +49,10 @@ var ( CacheResults: true, ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ CacheConfig: cache.Config{ - EnableFifoCache: true, - Fifocache: cache.FifoCacheConfig{ - MaxSizeItems: 1024, - TTL: 24 * time.Hour, + EmbeddedCache: cache.EmbeddedCacheConfig{ + Enabled: true, + MaxSizeMB: 1024, + TTL: 24 * time.Hour, }, }, }, @@ -62,10 +62,10 @@ var ( StatsCacheConfig: IndexStatsCacheConfig{ ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ CacheConfig: cache.Config{ - EnableFifoCache: true, - Fifocache: cache.FifoCacheConfig{ - MaxSizeItems: 1024, - TTL: 24 * time.Hour, + EmbeddedCache: cache.EmbeddedCacheConfig{ + Enabled: true, + MaxSizeMB: 1024, + TTL: 24 * time.Hour, }, }, }, @@ -73,10 +73,10 @@ var ( VolumeCacheConfig: VolumeCacheConfig{ ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ CacheConfig: cache.Config{ - EnableFifoCache: true, - Fifocache: cache.FifoCacheConfig{ - MaxSizeItems: 1024, - TTL: 24 * time.Hour, + EmbeddedCache: cache.EmbeddedCacheConfig{ + Enabled: true, + MaxSizeMB: 1024, + TTL: 24 * time.Hour, }, }, }, diff --git a/pkg/storage/chunk/cache/cache.go b/pkg/storage/chunk/cache/cache.go index d12240c87b091..7354960d75cf6 100644 --- a/pkg/storage/chunk/cache/cache.go +++ b/pkg/storage/chunk/cache/cache.go @@ -9,7 +9,6 @@ import ( "github.com/pkg/errors" "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/grafana/loki/pkg/logqlmodel/stats" @@ -26,8 +25,6 @@ type Cache interface { // Config for building Caches. type Config struct { - EnableFifoCache bool `yaml:"enable_fifocache"` - DefaultValidity time.Duration `yaml:"default_validity"` Background BackgroundConfig `yaml:"background"` @@ -35,7 +32,6 @@ type Config struct { MemcacheClient MemcachedClientConfig `yaml:"memcached_client"` Redis RedisConfig `yaml:"redis"` EmbeddedCache EmbeddedCacheConfig `yaml:"embedded_cache"` - Fifocache FifoCacheConfig `yaml:"fifocache"` // deprecated // This is to name the cache metrics properly. Prefix string `yaml:"prefix" doc:"hidden"` @@ -55,20 +51,14 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, description string, f cfg.Memcache.RegisterFlagsWithPrefix(prefix, description, f) cfg.MemcacheClient.RegisterFlagsWithPrefix(prefix, description, f) cfg.Redis.RegisterFlagsWithPrefix(prefix, description, f) - cfg.Fifocache.RegisterFlagsWithPrefix(prefix, description, f) cfg.EmbeddedCache.RegisterFlagsWithPrefix(prefix, description, f) f.IntVar(&cfg.AsyncCacheWriteBackConcurrency, prefix+"max-async-cache-write-back-concurrency", 16, "The maximum number of concurrent asynchronous writeback cache can occur.") f.IntVar(&cfg.AsyncCacheWriteBackBufferSize, prefix+"max-async-cache-write-back-buffer-size", 500, "The maximum number of enqueued asynchronous writeback cache allowed.") f.DurationVar(&cfg.DefaultValidity, prefix+"default-validity", time.Hour, description+"The default validity of entries for caches unless overridden.") - f.BoolVar(&cfg.EnableFifoCache, prefix+"cache.enable-fifocache", false, description+"(deprecated: use embedded-cache instead) Enable in-memory cache (auto-enabled for the chunks & query results cache if no other cache is configured).") cfg.Prefix = prefix } -func (cfg *Config) Validate() error { - return cfg.Fifocache.Validate() -} - // IsMemcacheSet returns whether a non empty Memcache config is set or not, based on the configured // host or addresses. // @@ -88,10 +78,6 @@ func IsEmbeddedCacheSet(cfg Config) bool { return cfg.EmbeddedCache.Enabled } -func IsFifoCacheSet(cfg Config) bool { - return cfg.EnableFifoCache -} - func IsSpecificImplementationSet(cfg Config) bool { return cfg.Cache != nil } @@ -100,10 +86,9 @@ func IsSpecificImplementationSet(cfg Config) bool { // - memcached // - redis // - embedded-cache -// - fifo-cache // - specific cache implementation func IsCacheConfigured(cfg Config) bool { - return IsMemcacheSet(cfg) || IsRedisSet(cfg) || IsEmbeddedCacheSet(cfg) || IsFifoCacheSet(cfg) || IsSpecificImplementationSet(cfg) + return IsMemcacheSet(cfg) || IsRedisSet(cfg) || IsEmbeddedCacheSet(cfg) || IsSpecificImplementationSet(cfg) } // New creates a new Cache using Config. @@ -116,24 +101,11 @@ func New(cfg Config, reg prometheus.Registerer, logger log.Logger, cacheType sta } var caches []Cache - - // Currently fifocache can be enabled in two ways. - // 1. cfg.EnableFifocache (old deprecated way) - // 2. cfg.EmbeddedCache.Enabled=true and cfg.EmbeddedCache.Distributed=false (new way) - if cfg.EnableFifoCache || cfg.EmbeddedCache.IsEnabled() { - var fifocfg FifoCacheConfig - - if cfg.EnableFifoCache { - level.Warn(logger).Log("msg", "fifocache config is deprecated. use embedded-cache instead") - fifocfg = cfg.Fifocache - } - - if cfg.EmbeddedCache.IsEnabled() { - fifocfg = FifoCacheConfig{ - MaxSizeBytes: fmt.Sprint(cfg.EmbeddedCache.MaxSizeMB * 1e6), - TTL: cfg.EmbeddedCache.TTL, - PurgeInterval: cfg.EmbeddedCache.PurgeInterval, - } + if cfg.EmbeddedCache.IsEnabled() { + fifocfg := FifoCacheConfig{ + MaxSizeBytes: fmt.Sprint(cfg.EmbeddedCache.MaxSizeMB * 1e6), + TTL: cfg.EmbeddedCache.TTL, + PurgeInterval: cfg.EmbeddedCache.PurgeInterval, } if fifocfg.TTL == 0 && cfg.DefaultValidity != 0 { diff --git a/pkg/storage/config/store.go b/pkg/storage/config/store.go index ce9c2aaf73567..d3872aee02ab2 100644 --- a/pkg/storage/config/store.go +++ b/pkg/storage/config/store.go @@ -55,8 +55,6 @@ func (cfg *ChunkStoreConfig) Validate(logger log.Logger) error { flagext.DeprecatedFlagsUsed.Inc() level.Warn(logger).Log("msg", "running with DEPRECATED flag -store.max-look-back-period, use -querier.max-query-lookback instead.") } - if err := cfg.ChunkCacheConfig.Validate(); err != nil { - return err - } - return cfg.WriteDedupeCacheConfig.Validate() + + return nil } diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index 758327f7e616a..5da9063516821 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -332,9 +332,6 @@ func (cfg *Config) Validate() error { if err := cfg.Swift.Validate(); err != nil { return errors.Wrap(err, "invalid Swift Storage config") } - if err := cfg.IndexQueriesCacheConfig.Validate(); err != nil { - return errors.Wrap(err, "invalid Index Queries Cache config") - } if err := cfg.AzureStorageConfig.Validate(); err != nil { return errors.Wrap(err, "invalid Azure Storage config") } From a274bd5eefb34da2f8d9208de3d236ba9749b5e3 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Mon, 18 Sep 2023 11:18:54 +0530 Subject: [PATCH 02/14] add changelog --- CHANGELOG.md | 1 + docs/sources/setup/upgrade/_index.md | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c619ab5fc04de..4c6341c9a3cd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ * [10395](https://github.com/grafana/loki/pull/10395/) **shantanualshi** Remove deprecated `split_queries_by_interval` and `forward_headers_list` configuration options in the `query_range` section * [10456](https://github.com/grafana/loki/pull/10456) **dannykopping** Add `loki_distributor_ingester_append_timeouts_total` metric, remove `loki_distributor_ingester_append_failures_total` metric * [10534](https://github.com/grafana/loki/pull/10534) **chaudum** Remove configuration `use_boltdb_shipper_as_backup` +* [10620](https://github.com/grafana/loki/pull/10620) **ashwanthgoli** Enable embedded cache if no other cache is explicitly enabled. ##### Fixes diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md index 1f3abeae307fd..c60771bf64c11 100644 --- a/docs/sources/setup/upgrade/_index.md +++ b/docs/sources/setup/upgrade/_index.md @@ -54,6 +54,7 @@ The previous default value `false` is applied. 5. `experimental.ruler.enable-api` is removed. Use `ruler.enable-api` instead. 6. `split_queries_by_interval` is removed from `query_range` YAML section. You can instead configure it in [Limits Config](/docs/loki/latest/configuration/#limits_config). 7. `frontend.forward-headers-list` CLI flag and its corresponding YAML setting are removed. +8. `frontend.cache-split-interval` CLI flag is removed. Results caching interval is now determined by `querier.split-queries-by-interval`. #### Distributor metric changes From 76e1dd3a746170b70f36547d50871485bc1d24c7 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Mon, 18 Sep 2023 11:24:58 +0530 Subject: [PATCH 03/14] disable embedded cache for migrate tool --- cmd/migrate/main.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/migrate/main.go b/cmd/migrate/main.go index 5eb6fc994dc90..d8b1a0dd5d466 100644 --- a/cmd/migrate/main.go +++ b/cmd/migrate/main.go @@ -76,11 +76,13 @@ func main() { // This is a little brittle, if we add a new cache it may easily get missed here but it's important to disable // any of the chunk caches to save on memory because we write chunks to the cache when we call Put operations on the store. + sourceConfig.ChunkStoreConfig.ChunkCacheConfig.EmbeddedCache.Enabled = false sourceConfig.ChunkStoreConfig.ChunkCacheConfig.MemcacheClient = defaultsConfig.ChunkStoreConfig.ChunkCacheConfig.MemcacheClient sourceConfig.ChunkStoreConfig.ChunkCacheConfig.Redis = defaultsConfig.ChunkStoreConfig.ChunkCacheConfig.Redis sourceConfig.ChunkStoreConfig.WriteDedupeCacheConfig.MemcacheClient = defaultsConfig.ChunkStoreConfig.WriteDedupeCacheConfig.MemcacheClient sourceConfig.ChunkStoreConfig.WriteDedupeCacheConfig.Redis = defaultsConfig.ChunkStoreConfig.WriteDedupeCacheConfig.Redis + destConfig.ChunkStoreConfig.ChunkCacheConfig.EmbeddedCache.Enabled = false destConfig.ChunkStoreConfig.ChunkCacheConfig.MemcacheClient = defaultsConfig.ChunkStoreConfig.ChunkCacheConfig.MemcacheClient destConfig.ChunkStoreConfig.ChunkCacheConfig.Redis = defaultsConfig.ChunkStoreConfig.ChunkCacheConfig.Redis destConfig.ChunkStoreConfig.WriteDedupeCacheConfig.MemcacheClient = defaultsConfig.ChunkStoreConfig.WriteDedupeCacheConfig.MemcacheClient From 81a074f48f0aaf8bfcabfee9cb467db79859aa58 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Mon, 18 Sep 2023 11:37:22 +0530 Subject: [PATCH 04/14] fix Test_ApplyDynamicConfig --- pkg/loki/config_wrapper_test.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go index 5f0a534695f67..7fd58d068044e 100644 --- a/pkg/loki/config_wrapper_test.go +++ b/pkg/loki/config_wrapper_test.go @@ -839,10 +839,6 @@ ingester: }) t.Run("embedded-cache setting is applied to result caches", func(t *testing.T) { - // ensure they are all false by default - config, _, _ := configWrapperFromYAML(t, minimalConfig, nil) - assert.False(t, config.QueryRange.ResultsCacheConfig.CacheConfig.EmbeddedCache.Enabled) - configFileString := `--- query_range: results_cache: @@ -850,8 +846,7 @@ query_range: embedded_cache: enabled: true` - config, _ = testContext(configFileString, nil) - + config, _ := testContext(configFileString, nil) assert.True(t, config.QueryRange.ResultsCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) } From 339a134f33df911747acd4b207f6b21f20a3d4c2 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Mon, 18 Sep 2023 12:11:28 +0530 Subject: [PATCH 05/14] fix TestQueryTSDB_WithCachedPostings --- integration/loki_micro_services_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go index a5cb839d0ea3d..00df38279ec62 100644 --- a/integration/loki_micro_services_test.go +++ b/integration/loki_micro_services_test.go @@ -588,7 +588,7 @@ func TestQueryTSDB_WithCachedPostings(t *testing.T) { "index-gateway", "-target=index-gateway", "-tsdb.enable-postings-cache=true", - "-store.index-cache-read.cache.enable-fifocache=true", + "-store.index-cache-read.embedded-cache.enabled=true", ) ) require.NoError(t, clu.Run()) From ddd8866a919f2f45fe8cd70f2d97dacb6ce1b017 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Mon, 18 Sep 2023 12:19:37 +0530 Subject: [PATCH 06/14] make doc --- docs/sources/configure/_index.md | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index eef02ae112f3b..2c448791dbcd4 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -3929,11 +3929,6 @@ The cache block configures the cache backend. The supported CLI flags ``   ```yaml -# (deprecated: use embedded-cache instead) Enable in-memory cache (auto-enabled -# for the chunks & query results cache if no other cache is configured). -# CLI flag: -.cache.enable-fifocache -[enable_fifocache: | default = false] - # The default validity of entries for caches unless overridden. # CLI flag: -.default-validity [default_validity: | default = 1h] @@ -4088,31 +4083,6 @@ embedded_cache: # CLI flag: -.embedded-cache.ttl [ttl: | default = 1h] -fifocache: - # Maximum memory size of the cache in bytes. A unit suffix (KB, MB, GB) may be - # applied. - # CLI flag: -.fifocache.max-size-bytes - [max_size_bytes: | default = "1GB"] - - # deprecated: Maximum number of entries in the cache. - # CLI flag: -.fifocache.max-size-items - [max_size_items: | default = 0] - - # The time to live for items in the cache before they get purged. - # CLI flag: -.fifocache.ttl - [ttl: | default = 1h] - - # Deprecated (use ttl instead): The expiry duration for the cache. - # CLI flag: -.fifocache.duration - [validity: | default = 0s] - - # Deprecated (use max-size-items or max-size-bytes instead): The number of - # entries to cache. - # CLI flag: -.fifocache.size - [size: | default = 0] - - [purgeinterval: ] - # The maximum number of concurrent asynchronous writeback cache can occur. # CLI flag: -.max-async-cache-write-back-concurrency [async_cache_write_back_concurrency: | default = 16] From bdb20e67469dc8348484e1a5efe86edf20c1207a Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Mon, 18 Sep 2023 12:33:53 +0530 Subject: [PATCH 07/14] add a note about the change of results cache size --- docs/sources/setup/upgrade/_index.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md index c60771bf64c11..da4c1d7b973db 100644 --- a/docs/sources/setup/upgrade/_index.md +++ b/docs/sources/setup/upgrade/_index.md @@ -61,6 +61,9 @@ The previous default value `false` is applied. The `loki_distributor_ingester_append_failures_total` metric has been removed in favour of `loki_distributor_ingester_append_timeouts_total`. This new metric will provide a more clear signal that there is an issue with ingesters, and this metric can be used for high-signal alerting. +#### Changes to default configuration values + +1. `frontend.embedded-cache.max-size-mb` Embedded results cache size is now limited to 100MB. Increase this value according to your log volume. ## 2.9.0 From 6f915132924bb00a6f148e552bd0213209b9fcbd Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Mon, 18 Sep 2023 12:51:13 +0530 Subject: [PATCH 08/14] fixup! add a note about the change of results cache size --- docs/sources/setup/upgrade/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md index da4c1d7b973db..b0fc484558c32 100644 --- a/docs/sources/setup/upgrade/_index.md +++ b/docs/sources/setup/upgrade/_index.md @@ -63,7 +63,7 @@ This new metric will provide a more clear signal that there is an issue with ing #### Changes to default configuration values -1. `frontend.embedded-cache.max-size-mb` Embedded results cache size is now limited to 100MB. Increase this value according to your log volume. +1. `frontend.embedded-cache.max-size-mb` Embedded results cache size now defaults to 100MB. ## 2.9.0 From 80ebaa7da941c7a29ba5b04a56ff831513d9de65 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Fri, 22 Sep 2023 12:57:59 +0530 Subject: [PATCH 09/14] replace MaxSizeBytes with MaxSizeMB --- pkg/storage/chunk/cache/cache.go | 2 +- pkg/storage/chunk/cache/fifo_cache.go | 35 +----- pkg/storage/chunk/cache/fifo_cache_test.go | 119 ++++++++---------- .../stores/tsdb/cached_postings_index_test.go | 10 +- 4 files changed, 61 insertions(+), 105 deletions(-) diff --git a/pkg/storage/chunk/cache/cache.go b/pkg/storage/chunk/cache/cache.go index 7354960d75cf6..6af7a1b22a0ae 100644 --- a/pkg/storage/chunk/cache/cache.go +++ b/pkg/storage/chunk/cache/cache.go @@ -103,7 +103,7 @@ func New(cfg Config, reg prometheus.Registerer, logger log.Logger, cacheType sta var caches []Cache if cfg.EmbeddedCache.IsEnabled() { fifocfg := FifoCacheConfig{ - MaxSizeBytes: fmt.Sprint(cfg.EmbeddedCache.MaxSizeMB * 1e6), + MaxSizeMB: cfg.EmbeddedCache.MaxSizeMB, TTL: cfg.EmbeddedCache.TTL, PurgeInterval: cfg.EmbeddedCache.PurgeInterval, } diff --git a/pkg/storage/chunk/cache/fifo_cache.go b/pkg/storage/chunk/cache/fifo_cache.go index c131857ab8d83..57f5df589b0c5 100644 --- a/pkg/storage/chunk/cache/fifo_cache.go +++ b/pkg/storage/chunk/cache/fifo_cache.go @@ -3,7 +3,6 @@ package cache import ( "container/list" "context" - "flag" "fmt" "sync" "time" @@ -12,7 +11,6 @@ import ( "github.com/dustin/go-humanize" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/dskit/flagext" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -33,31 +31,13 @@ const ( // FifoCacheConfig holds config for the FifoCache. type FifoCacheConfig struct { - MaxSizeBytes string `yaml:"max_size_bytes"` + MaxSizeMB int64 `yaml:"max_size_mb"` MaxSizeItems int `yaml:"max_size_items"` // deprecated TTL time.Duration `yaml:"ttl"` - DeprecatedValidity time.Duration `yaml:"validity"` - DeprecatedSize int `yaml:"size"` - PurgeInterval time.Duration } -// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet -func (cfg *FifoCacheConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) { - f.StringVar(&cfg.MaxSizeBytes, prefix+"fifocache.max-size-bytes", "1GB", description+"Maximum memory size of the cache in bytes. A unit suffix (KB, MB, GB) may be applied.") - f.IntVar(&cfg.MaxSizeItems, prefix+"fifocache.max-size-items", 0, description+"deprecated: Maximum number of entries in the cache.") - f.DurationVar(&cfg.TTL, prefix+"fifocache.ttl", time.Hour, description+"The time to live for items in the cache before they get purged.") - - f.DurationVar(&cfg.DeprecatedValidity, prefix+"fifocache.duration", 0, "Deprecated (use ttl instead): "+description+"The expiry duration for the cache.") - f.IntVar(&cfg.DeprecatedSize, prefix+"fifocache.size", 0, "Deprecated (use max-size-items or max-size-bytes instead): "+description+"The number of entries to cache.") -} - -func (cfg *FifoCacheConfig) Validate() error { - _, err := parsebytes(cfg.MaxSizeBytes) - return err -} - func parsebytes(s string) (uint64, error) { if len(s) == 0 { return 0, nil @@ -110,12 +90,7 @@ type cacheEntry struct { func NewFifoCache(name string, cfg FifoCacheConfig, reg prometheus.Registerer, logger log.Logger, cacheType stats.CacheType) *FifoCache { util_log.WarnExperimentalUse(fmt.Sprintf("In-memory (FIFO) cache - %s", name), logger) - if cfg.DeprecatedSize > 0 { - flagext.DeprecatedFlagsUsed.Inc() - level.Warn(logger).Log("msg", "running with DEPRECATED flag fifocache.size, use fifocache.max-size-items or fifocache.max-size-bytes instead", "cache", name) - cfg.MaxSizeItems = cfg.DeprecatedSize - } - maxSizeBytes, _ := parsebytes(cfg.MaxSizeBytes) + maxSizeBytes := uint64(cfg.MaxSizeMB * 1e6) if maxSizeBytes == 0 && cfg.MaxSizeItems == 0 { // zero cache capacity - no need to create cache @@ -123,12 +98,6 @@ func NewFifoCache(name string, cfg FifoCacheConfig, reg prometheus.Registerer, l return nil } - if cfg.DeprecatedValidity > 0 { - flagext.DeprecatedFlagsUsed.Inc() - level.Warn(logger).Log("msg", "running with DEPRECATED flag fifocache.duration, use fifocache.ttl instead", "cache", name) - cfg.TTL = cfg.DeprecatedValidity - } - // Set a default interval for the ticker // This can be overwritten to a smaller value in tests if cfg.PurgeInterval == 0 { diff --git a/pkg/storage/chunk/cache/fifo_cache_test.go b/pkg/storage/chunk/cache/fifo_cache_test.go index 6bc7153b64bf4..8838b5004f58c 100644 --- a/pkg/storage/chunk/cache/fifo_cache_test.go +++ b/pkg/storage/chunk/cache/fifo_cache_test.go @@ -3,7 +3,6 @@ package cache import ( "context" "fmt" - "strconv" "testing" "time" @@ -18,9 +17,17 @@ func TestFifoCacheEviction(t *testing.T) { cnt = 10 evicted = 5 ) + + // compute value size such that 10 entries account to exactly 1MB. + // adding one more entry to the cache would result in eviction when MaxSizeMB is configured to a value of 1. + // value cap = target size of each entry (0.1MB) - size of cache entry with empty value. + valueCap := (1e6 / cnt) - sizeOf(&cacheEntry{ + key: "00", + }) + itemTemplate := &cacheEntry{ key: "00", - value: []byte("00"), + value: make([]byte, 0, valueCap), } tests := []struct { @@ -29,7 +36,7 @@ func TestFifoCacheEviction(t *testing.T) { }{ { name: "test-memory-eviction", - cfg: FifoCacheConfig{MaxSizeBytes: strconv.FormatInt(int64(cnt*sizeOf(itemTemplate)), 10), TTL: 1 * time.Minute}, + cfg: FifoCacheConfig{MaxSizeMB: 1, TTL: 1 * time.Minute}, }, { name: "test-items-eviction", @@ -46,7 +53,7 @@ func TestFifoCacheEviction(t *testing.T) { values := [][]byte{} for i := 0; i < cnt; i++ { key := fmt.Sprintf("%02d", i) - value := make([]byte, len(key)) + value := make([]byte, len(key), valueCap) copy(value, key) keys = append(keys, key) values = append(values, value) @@ -89,7 +96,7 @@ func TestFifoCacheEviction(t *testing.T) { values = [][]byte{} for i := cnt - evicted; i < cnt+evicted; i++ { key := fmt.Sprintf("%02d", i) - value := make([]byte, len(key)) + value := make([]byte, len(key), valueCap) copy(value, key) keys = append(keys, key) values = append(values, value) @@ -135,7 +142,7 @@ func TestFifoCacheEviction(t *testing.T) { for i := cnt; i < cnt+evicted; i++ { keys = append(keys, fmt.Sprintf("%02d", i)) vstr := fmt.Sprintf("%02d", i*2) - value := make([]byte, len(vstr)) + value := make([]byte, len(vstr), valueCap) copy(value, vstr) values = append(values, value) } @@ -171,73 +178,53 @@ func TestFifoCacheExpiry(t *testing.T) { sizeOf(&cacheEntry{key: key2, value: data2}) + sizeOf(&cacheEntry{key: key3, value: data3}) - tests := []struct { - name string - cfg FifoCacheConfig - }{ - { - name: "test-memory-expiry", - cfg: FifoCacheConfig{ - MaxSizeBytes: strconv.FormatInt(int64(memorySz), 10), - TTL: 250 * time.Millisecond, - PurgeInterval: 100 * time.Millisecond, - }, - }, - { - name: "test-items-expiry", - cfg: FifoCacheConfig{ - MaxSizeItems: 3, - TTL: 100 * time.Millisecond, - PurgeInterval: 50 * time.Millisecond, - }, - }, + cfg := FifoCacheConfig{ + MaxSizeItems: 3, + TTL: 100 * time.Millisecond, + PurgeInterval: 50 * time.Millisecond, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - c := NewFifoCache(test.name, test.cfg, nil, log.NewNopLogger(), "test") - ctx := context.Background() + c := NewFifoCache("cache_exprity_test", cfg, nil, log.NewNopLogger(), "test") + ctx := context.Background() - err := c.Store(ctx, []string{key1, key2, key3, key4}, [][]byte{data1, data2, data3, data4}) - require.NoError(t, err) + err := c.Store(ctx, []string{key1, key2, key3, key4}, [][]byte{data1, data2, data3, data4}) + require.NoError(t, err) - value, ok := c.Get(ctx, key4) - require.True(t, ok) - require.Equal(t, data1, value) + value, ok := c.Get(ctx, key4) + require.True(t, ok) + require.Equal(t, data4, value) - _, ok = c.Get(ctx, key1) - require.False(t, ok) + _, ok = c.Get(ctx, key1) + require.False(t, ok) - assert.Equal(t, float64(1), testutil.ToFloat64(c.entriesAdded)) - assert.Equal(t, float64(4), testutil.ToFloat64(c.entriesAddedNew)) - assert.Equal(t, float64(0), testutil.ToFloat64(c.entriesEvicted.WithLabelValues(expiredReason))) - assert.Equal(t, float64(1), testutil.ToFloat64(c.entriesEvicted.WithLabelValues(fullReason))) - assert.Equal(t, float64(3), testutil.ToFloat64(c.entriesCurrent)) - assert.Equal(t, float64(len(c.entries)), testutil.ToFloat64(c.entriesCurrent)) - assert.Equal(t, float64(c.lru.Len()), testutil.ToFloat64(c.entriesCurrent)) - assert.Equal(t, float64(2), testutil.ToFloat64(c.totalGets)) - assert.Equal(t, float64(1), testutil.ToFloat64(c.totalMisses)) - assert.Equal(t, float64(memorySz), testutil.ToFloat64(c.memoryBytes)) - - // Expire the item. - time.Sleep(2 * test.cfg.TTL) - _, ok = c.Get(ctx, key4) - require.False(t, ok) + assert.Equal(t, float64(1), testutil.ToFloat64(c.entriesAdded)) + assert.Equal(t, float64(4), testutil.ToFloat64(c.entriesAddedNew)) + assert.Equal(t, float64(0), testutil.ToFloat64(c.entriesEvicted.WithLabelValues(expiredReason))) + assert.Equal(t, float64(1), testutil.ToFloat64(c.entriesEvicted.WithLabelValues(fullReason))) + assert.Equal(t, float64(3), testutil.ToFloat64(c.entriesCurrent)) + assert.Equal(t, float64(len(c.entries)), testutil.ToFloat64(c.entriesCurrent)) + assert.Equal(t, float64(c.lru.Len()), testutil.ToFloat64(c.entriesCurrent)) + assert.Equal(t, float64(2), testutil.ToFloat64(c.totalGets)) + assert.Equal(t, float64(1), testutil.ToFloat64(c.totalMisses)) + assert.Equal(t, float64(memorySz), testutil.ToFloat64(c.memoryBytes)) - assert.Equal(t, float64(1), testutil.ToFloat64(c.entriesAdded)) - assert.Equal(t, float64(4), testutil.ToFloat64(c.entriesAddedNew)) - assert.Equal(t, float64(3), testutil.ToFloat64(c.entriesEvicted.WithLabelValues(expiredReason))) - assert.Equal(t, float64(1), testutil.ToFloat64(c.entriesEvicted.WithLabelValues(fullReason))) - assert.Equal(t, float64(0), testutil.ToFloat64(c.entriesCurrent)) - assert.Equal(t, float64(len(c.entries)), testutil.ToFloat64(c.entriesCurrent)) - assert.Equal(t, float64(c.lru.Len()), testutil.ToFloat64(c.entriesCurrent)) - assert.Equal(t, float64(3), testutil.ToFloat64(c.totalGets)) - assert.Equal(t, float64(2), testutil.ToFloat64(c.totalMisses)) - assert.Equal(t, float64(memorySz), testutil.ToFloat64(c.memoryBytes)) - - c.Stop() - }) - } + // Expire the item. + time.Sleep(2 * cfg.TTL) + _, ok = c.Get(ctx, key4) + require.False(t, ok) + + assert.Equal(t, float64(1), testutil.ToFloat64(c.entriesAdded)) + assert.Equal(t, float64(4), testutil.ToFloat64(c.entriesAddedNew)) + assert.Equal(t, float64(3), testutil.ToFloat64(c.entriesEvicted.WithLabelValues(expiredReason))) + assert.Equal(t, float64(1), testutil.ToFloat64(c.entriesEvicted.WithLabelValues(fullReason))) + assert.Equal(t, float64(0), testutil.ToFloat64(c.entriesCurrent)) + assert.Equal(t, float64(len(c.entries)), testutil.ToFloat64(c.entriesCurrent)) + assert.Equal(t, float64(c.lru.Len()), testutil.ToFloat64(c.entriesCurrent)) + assert.Equal(t, float64(3), testutil.ToFloat64(c.totalGets)) + assert.Equal(t, float64(2), testutil.ToFloat64(c.totalMisses)) + assert.Equal(t, float64(memorySz), testutil.ToFloat64(c.memoryBytes)) + + c.Stop() } func genBytes(n uint8) []byte { diff --git a/pkg/storage/stores/tsdb/cached_postings_index_test.go b/pkg/storage/stores/tsdb/cached_postings_index_test.go index 18ab43deccdef..7409ec9e53fcb 100644 --- a/pkg/storage/stores/tsdb/cached_postings_index_test.go +++ b/pkg/storage/stores/tsdb/cached_postings_index_test.go @@ -19,7 +19,7 @@ import ( func TestSingleIdxCached(t *testing.T) { // setup cache. - cfg := cache.FifoCacheConfig{MaxSizeBytes: "1MB"} + cfg := cache.FifoCacheConfig{MaxSizeMB: 1} c := cache.NewFifoCache("test-cache", cfg, nil, log.NewNopLogger(), "test") defer c.Stop() @@ -217,7 +217,7 @@ func TestSingleIdxCached(t *testing.T) { func BenchmarkCacheableTSDBIndex_GetChunkRefs(b *testing.B) { // setup cache. - cfg := cache.FifoCacheConfig{MaxSizeBytes: "1MB"} + cfg := cache.FifoCacheConfig{MaxSizeMB: 1} c := cache.NewFifoCache("test-cache", cfg, nil, log.NewNopLogger(), "test") defer c.Stop() @@ -273,7 +273,7 @@ func BenchmarkCacheableTSDBIndex_GetChunkRefs(b *testing.B) { func TestCacheableTSDBIndex_Stats(t *testing.T) { // setup cache. - cfg := cache.FifoCacheConfig{MaxSizeBytes: "1MB"} + cfg := cache.FifoCacheConfig{MaxSizeMB: 1} c := cache.NewFifoCache("test-cache", cfg, nil, log.NewNopLogger(), "test") defer c.Stop() @@ -388,7 +388,7 @@ func TestCacheableTSDBIndex_Stats(t *testing.T) { func BenchmarkSeriesRepetitive(b *testing.B) { // setup cache. - cfg := cache.FifoCacheConfig{MaxSizeBytes: "1MB"} + cfg := cache.FifoCacheConfig{MaxSizeMB: 1} c := cache.NewFifoCache("test-cache", cfg, nil, log.NewNopLogger(), "test") defer c.Stop() @@ -443,7 +443,7 @@ func BenchmarkSeriesRepetitive(b *testing.B) { func TestMultipleIndexesFiles(t *testing.T) { // setup cache. - cfg := cache.FifoCacheConfig{MaxSizeBytes: "1MB"} + cfg := cache.FifoCacheConfig{MaxSizeMB: 1} c := cache.NewFifoCache("test-cache", cfg, nil, log.NewNopLogger(), "test") defer c.Stop() From 0f759327ba5e36de19495824f3c35a43620d0e92 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Fri, 22 Sep 2023 14:49:14 +0530 Subject: [PATCH 10/14] rename fifocache to embedded cache --- pkg/querier/queryrange/roundtrip_test.go | 6 +- pkg/storage/chunk/cache/cache.go | 12 +- pkg/storage/chunk/cache/cache_test.go | 2 +- pkg/storage/chunk/cache/embeddedcache.go | 318 +++++++++++++++- ...fo_cache_test.go => embeddedcache_test.go} | 50 +-- pkg/storage/chunk/cache/fifo_cache.go | 342 ------------------ .../chunk/tests/caching_fixtures_test.go | 2 +- .../series/index/caching_index_client_test.go | 12 +- .../stores/series/series_store_test.go | 4 +- .../stores/tsdb/cached_postings_index_test.go | 20 +- 10 files changed, 344 insertions(+), 424 deletions(-) rename pkg/storage/chunk/cache/{fifo_cache_test.go => embeddedcache_test.go} (84%) delete mode 100644 pkg/storage/chunk/cache/fifo_cache.go diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index e83cc6e896302..d54b5965ebc32 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -757,7 +757,8 @@ func TestNewTripperware_Caches(t *testing.T) { ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ CacheConfig: cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ - Enabled: true, + MaxSizeMB: 1, + Enabled: true, }, }, }, @@ -775,7 +776,8 @@ func TestNewTripperware_Caches(t *testing.T) { ResultsCacheConfig: queryrangebase.ResultsCacheConfig{ CacheConfig: cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ - Enabled: true, + MaxSizeMB: 1, + Enabled: true, }, }, }, diff --git a/pkg/storage/chunk/cache/cache.go b/pkg/storage/chunk/cache/cache.go index 6af7a1b22a0ae..edea291b4c5df 100644 --- a/pkg/storage/chunk/cache/cache.go +++ b/pkg/storage/chunk/cache/cache.go @@ -102,17 +102,11 @@ func New(cfg Config, reg prometheus.Registerer, logger log.Logger, cacheType sta var caches []Cache if cfg.EmbeddedCache.IsEnabled() { - fifocfg := FifoCacheConfig{ - MaxSizeMB: cfg.EmbeddedCache.MaxSizeMB, - TTL: cfg.EmbeddedCache.TTL, - PurgeInterval: cfg.EmbeddedCache.PurgeInterval, + if cfg.EmbeddedCache.TTL == 0 && cfg.DefaultValidity != 0 { + cfg.EmbeddedCache.TTL = cfg.DefaultValidity } - if fifocfg.TTL == 0 && cfg.DefaultValidity != 0 { - fifocfg.TTL = cfg.DefaultValidity - } - - if cache := NewFifoCache(cfg.Prefix+"embedded-cache", fifocfg, reg, logger, cacheType); cache != nil { + if cache := NewEmbeddedCache(cfg.Prefix+"embedded-cache", cfg.EmbeddedCache, reg, logger, cacheType); cache != nil { caches = append(caches, CollectStats(Instrument(cfg.Prefix+"embedded-cache", cache, reg))) } } diff --git a/pkg/storage/chunk/cache/cache_test.go b/pkg/storage/chunk/cache/cache_test.go index 9f2671ed97d05..efe33c6f94c8f 100644 --- a/pkg/storage/chunk/cache/cache_test.go +++ b/pkg/storage/chunk/cache/cache_test.go @@ -205,7 +205,7 @@ func TestMemcache(t *testing.T) { } func TestFifoCache(t *testing.T) { - cache := cache.NewFifoCache("test", cache.FifoCacheConfig{MaxSizeItems: 1e3, TTL: 1 * time.Hour}, + cache := cache.NewEmbeddedCache("test", cache.EmbeddedCacheConfig{MaxSizeItems: 1e3, TTL: 1 * time.Hour}, nil, log.NewNopLogger(), "test") testCache(t, cache) } diff --git a/pkg/storage/chunk/cache/embeddedcache.go b/pkg/storage/chunk/cache/embeddedcache.go index 59b7ce80b144d..fb9c8abc148ea 100644 --- a/pkg/storage/chunk/cache/embeddedcache.go +++ b/pkg/storage/chunk/cache/embeddedcache.go @@ -1,32 +1,338 @@ package cache import ( + "container/list" + "context" "flag" + "sync" "time" + "unsafe" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/grafana/loki/pkg/logqlmodel/stats" ) const ( - DefaultPurgeInterval = 1 * time.Minute + elementSize = int(unsafe.Sizeof(list.Element{})) + elementPrtSize = int(unsafe.Sizeof(&list.Element{})) + + defaultPurgeInterval = 1 * time.Minute + + expiredReason string = "expired" //nolint:staticcheck + fullReason = "full" + tooBigReason = "object too big" ) +// EmbeddedCache is a simple string -> interface{} cache which uses a fifo slide to +// manage evictions. O(1) inserts and updates, O(1) gets. +// +// This embedded cache implementation supports two eviction methods - based on number of items in the cache, and based on memory usage. +// For the memory-based eviction, set EmbeddedCacheConfig.MaxSizeMB to a positive integer, indicating upper limit of memory allocated by items in the cache. +// Alternatively, set EmbeddedCacheConfig.MaxSizeItems to a positive integer, indicating maximum number of items in the cache. +// If both parameters are set, both methods are enforced, whichever hits first. +type EmbeddedCache struct { + cacheType stats.CacheType + + lock sync.RWMutex + maxSizeItems int + maxSizeBytes uint64 + currSizeBytes uint64 + + entries map[string]*list.Element + lru *list.List + + done chan struct{} + + entriesAdded prometheus.Counter + entriesAddedNew prometheus.Counter + entriesEvicted *prometheus.CounterVec + entriesCurrent prometheus.Gauge + totalGets prometheus.Counter + totalMisses prometheus.Counter + staleGets prometheus.Counter + memoryBytes prometheus.Gauge +} + +type cacheEntry struct { + updated time.Time + key string + value []byte +} + // EmbeddedCacheConfig represents in-process embedded cache config. type EmbeddedCacheConfig struct { - Enabled bool `yaml:"enabled,omitempty"` - MaxSizeMB int64 `yaml:"max_size_mb"` - TTL time.Duration `yaml:"ttl"` + Enabled bool `yaml:"enabled,omitempty"` + MaxSizeMB int64 `yaml:"max_size_mb"` + MaxSizeItems int `yaml:"max_size_items"` + TTL time.Duration `yaml:"ttl"` // PurgeInterval tell how often should we remove keys that are expired. - // by default it takes `DefaultPurgeInterval` + // by default it takes `defaultPurgeInterval` PurgeInterval time.Duration `yaml:"-"` } func (cfg *EmbeddedCacheConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) { f.BoolVar(&cfg.Enabled, prefix+"embedded-cache.enabled", false, description+"Whether embedded cache is enabled.") f.Int64Var(&cfg.MaxSizeMB, prefix+"embedded-cache.max-size-mb", 100, description+"Maximum memory size of the cache in MB.") + f.IntVar(&cfg.MaxSizeItems, prefix+"embedded-cache.max-size-items", 0, description+"Maximum number of entries in the cache.") f.DurationVar(&cfg.TTL, prefix+"embedded-cache.ttl", time.Hour, description+"The time to live for items in the cache before they get purged.") - } func (cfg *EmbeddedCacheConfig) IsEnabled() bool { return cfg.Enabled } + +// NewEmbeddedCache returns a new initialised EmbeddedCache. +func NewEmbeddedCache(name string, cfg EmbeddedCacheConfig, reg prometheus.Registerer, logger log.Logger, cacheType stats.CacheType) *EmbeddedCache { + if cfg.MaxSizeMB == 0 && cfg.MaxSizeItems == 0 { + // zero cache capacity - no need to create cache + level.Warn(logger).Log("msg", "neither embedded-cache.max-size-mb nor embedded-cache.max-size-items is set", "cache", name) + return nil + } + + // Set a default interval for the ticker + // This can be overwritten to a smaller value in tests + if cfg.PurgeInterval == 0 { + cfg.PurgeInterval = defaultPurgeInterval + } + + cache := &EmbeddedCache{ + cacheType: cacheType, + + maxSizeItems: cfg.MaxSizeItems, + maxSizeBytes: uint64(cfg.MaxSizeMB * 1e6), + entries: make(map[string]*list.Element), + lru: list.New(), + + done: make(chan struct{}), + + entriesAdded: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "added_total", + Help: "The total number of Put calls on the cache", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + entriesAddedNew: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "added_new_total", + Help: "The total number of new entries added to the cache", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + entriesEvicted: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "evicted_total", + Help: "The total number of evicted entries", + ConstLabels: prometheus.Labels{"cache": name}, + }, []string{"reason"}), + + entriesCurrent: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "entries", + Help: "The total number of entries", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + totalGets: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "gets_total", + Help: "The total number of Get calls", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + totalMisses: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "misses_total", + Help: "The total number of Get calls that had no valid entry", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + staleGets: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "stale_gets_total", + Help: "The total number of Get calls that had an entry which expired (deprecated)", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + memoryBytes: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "memory_bytes", + Help: "The current cache size in bytes", + ConstLabels: prometheus.Labels{"cache": name}, + }), + } + + if cfg.TTL > 0 { + go cache.runPruneJob(cfg.PurgeInterval, cfg.TTL) + } + + return cache +} + +func (c *EmbeddedCache) runPruneJob(interval, ttl time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-c.done: + return + case <-ticker.C: + c.pruneExpiredItems(ttl) + } + } +} + +// pruneExpiredItems prunes items in the cache that exceeded their ttl +func (c *EmbeddedCache) pruneExpiredItems(ttl time.Duration) { + c.lock.Lock() + defer c.lock.Unlock() + + for k, v := range c.entries { + entry := v.Value.(*cacheEntry) + if time.Since(entry.updated) > ttl { + _ = c.lru.Remove(v).(*cacheEntry) + delete(c.entries, k) + c.currSizeBytes -= sizeOf(entry) + c.entriesCurrent.Dec() + c.entriesEvicted.WithLabelValues(expiredReason).Inc() + } + } +} + +// Fetch implements Cache. +func (c *EmbeddedCache) Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missing []string, err error) { + found, missing, bufs = make([]string, 0, len(keys)), make([]string, 0, len(keys)), make([][]byte, 0, len(keys)) + for _, key := range keys { + val, ok := c.Get(ctx, key) + if !ok { + missing = append(missing, key) + continue + } + + found = append(found, key) + bufs = append(bufs, val) + } + return +} + +// Store implements Cache. +func (c *EmbeddedCache) Store(_ context.Context, keys []string, values [][]byte) error { + c.entriesAdded.Inc() + + c.lock.Lock() + defer c.lock.Unlock() + + for i := range keys { + c.put(keys[i], values[i]) + } + return nil +} + +// Stop implements Cache. +func (c *EmbeddedCache) Stop() { + c.lock.Lock() + defer c.lock.Unlock() + + close(c.done) + + c.entries = make(map[string]*list.Element) + c.lru.Init() + c.currSizeBytes = 0 + + c.entriesCurrent.Set(float64(0)) + c.memoryBytes.Set(float64(0)) +} + +func (c *EmbeddedCache) GetCacheType() stats.CacheType { + return c.cacheType +} + +func (c *EmbeddedCache) put(key string, value []byte) { + // See if we already have the item in the cache. + element, ok := c.entries[key] + if ok { + // Remove the item from the cache. + entry := c.lru.Remove(element).(*cacheEntry) + delete(c.entries, key) + c.currSizeBytes -= sizeOf(entry) + c.entriesCurrent.Dec() + } + + entry := &cacheEntry{ + updated: time.Now(), + key: key, + value: value, + } + entrySz := sizeOf(entry) + + if c.maxSizeBytes > 0 && entrySz > c.maxSizeBytes { + // Cannot keep this item in the cache. + if ok { + // We do not replace this item. + c.entriesEvicted.WithLabelValues(tooBigReason).Inc() + } + c.memoryBytes.Set(float64(c.currSizeBytes)) + return + } + + // Otherwise, see if we need to evict item(s). + for (c.maxSizeBytes > 0 && c.currSizeBytes+entrySz > c.maxSizeBytes) || (c.maxSizeItems > 0 && len(c.entries) >= c.maxSizeItems) { + lastElement := c.lru.Back() + if lastElement == nil { + break + } + evicted := c.lru.Remove(lastElement).(*cacheEntry) + delete(c.entries, evicted.key) + c.currSizeBytes -= sizeOf(evicted) + c.entriesCurrent.Dec() + c.entriesEvicted.WithLabelValues(fullReason).Inc() + } + + // Finally, we have space to add the item. + c.entries[key] = c.lru.PushFront(entry) + c.currSizeBytes += entrySz + if !ok { + c.entriesAddedNew.Inc() + } + c.entriesCurrent.Inc() + c.memoryBytes.Set(float64(c.currSizeBytes)) +} + +// Get returns the stored value against the key and when the key was last updated. +func (c *EmbeddedCache) Get(_ context.Context, key string) ([]byte, bool) { + c.totalGets.Inc() + + c.lock.RLock() + defer c.lock.RUnlock() + + element, ok := c.entries[key] + if ok { + entry := element.Value.(*cacheEntry) + return entry.value, true + } + + c.totalMisses.Inc() + return nil, false +} + +func sizeOf(item *cacheEntry) uint64 { + return uint64(int(unsafe.Sizeof(*item)) + // size of cacheEntry + len(item.key) + // size of key + cap(item.value) + // size of value + elementSize + // size of the element in linked list + elementPrtSize) // size of the pointer to an element in the map +} diff --git a/pkg/storage/chunk/cache/fifo_cache_test.go b/pkg/storage/chunk/cache/embeddedcache_test.go similarity index 84% rename from pkg/storage/chunk/cache/fifo_cache_test.go rename to pkg/storage/chunk/cache/embeddedcache_test.go index 8838b5004f58c..1b4a6d5406b45 100644 --- a/pkg/storage/chunk/cache/fifo_cache_test.go +++ b/pkg/storage/chunk/cache/embeddedcache_test.go @@ -32,20 +32,20 @@ func TestFifoCacheEviction(t *testing.T) { tests := []struct { name string - cfg FifoCacheConfig + cfg EmbeddedCacheConfig }{ { name: "test-memory-eviction", - cfg: FifoCacheConfig{MaxSizeMB: 1, TTL: 1 * time.Minute}, + cfg: EmbeddedCacheConfig{MaxSizeMB: 1, TTL: 1 * time.Minute}, }, { name: "test-items-eviction", - cfg: FifoCacheConfig{MaxSizeItems: cnt, TTL: 1 * time.Minute}, + cfg: EmbeddedCacheConfig{MaxSizeItems: cnt, TTL: 1 * time.Minute}, }, } for _, test := range tests { - c := NewFifoCache(test.name, test.cfg, nil, log.NewNopLogger(), "test") + c := NewEmbeddedCache(test.name, test.cfg, nil, log.NewNopLogger(), "test") ctx := context.Background() // Check put / get works @@ -178,13 +178,13 @@ func TestFifoCacheExpiry(t *testing.T) { sizeOf(&cacheEntry{key: key2, value: data2}) + sizeOf(&cacheEntry{key: key3, value: data3}) - cfg := FifoCacheConfig{ + cfg := EmbeddedCacheConfig{ MaxSizeItems: 3, TTL: 100 * time.Millisecond, PurgeInterval: 50 * time.Millisecond, } - c := NewFifoCache("cache_exprity_test", cfg, nil, log.NewNopLogger(), "test") + c := NewEmbeddedCache("cache_exprity_test", cfg, nil, log.NewNopLogger(), "test") ctx := context.Background() err := c.Store(ctx, []string{key1, key2, key3, key4}, [][]byte{data1, data2, data3, data4}) @@ -234,41 +234,3 @@ func genBytes(n uint8) []byte { } return arr } - -func TestBytesParsing(t *testing.T) { - tests := []struct { - input string - expected uint64 - }{ - {input: "", expected: 0}, - {input: "123", expected: 123}, - {input: "1234567890", expected: 1234567890}, - {input: "25k", expected: 25000}, - {input: "25K", expected: 25000}, - {input: "25kb", expected: 25000}, - {input: "25kB", expected: 25000}, - {input: "25Kb", expected: 25000}, - {input: "25KB", expected: 25000}, - {input: "25kib", expected: 25600}, - {input: "25KiB", expected: 25600}, - {input: "25m", expected: 25000000}, - {input: "25M", expected: 25000000}, - {input: "25mB", expected: 25000000}, - {input: "25MB", expected: 25000000}, - {input: "2.5MB", expected: 2500000}, - {input: "25MiB", expected: 26214400}, - {input: "25mib", expected: 26214400}, - {input: "2.5mib", expected: 2621440}, - {input: "25g", expected: 25000000000}, - {input: "25G", expected: 25000000000}, - {input: "25gB", expected: 25000000000}, - {input: "25Gb", expected: 25000000000}, - {input: "25GiB", expected: 26843545600}, - {input: "25gib", expected: 26843545600}, - } - for _, test := range tests { - output, err := parsebytes(test.input) - assert.Nil(t, err) - assert.Equal(t, test.expected, output) - } -} diff --git a/pkg/storage/chunk/cache/fifo_cache.go b/pkg/storage/chunk/cache/fifo_cache.go deleted file mode 100644 index 57f5df589b0c5..0000000000000 --- a/pkg/storage/chunk/cache/fifo_cache.go +++ /dev/null @@ -1,342 +0,0 @@ -package cache - -import ( - "container/list" - "context" - "fmt" - "sync" - "time" - "unsafe" - - "github.com/dustin/go-humanize" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/grafana/loki/pkg/logqlmodel/stats" - util_log "github.com/grafana/loki/pkg/util/log" -) - -const ( - elementSize = int(unsafe.Sizeof(list.Element{})) - elementPrtSize = int(unsafe.Sizeof(&list.Element{})) -) - -// This FIFO cache implementation supports two eviction methods - based on number of items in the cache, and based on memory usage. -// For the memory-based eviction, set FifoCacheConfig.MaxSizeBytes to a positive integer, indicating upper limit of memory allocated by items in the cache. -// Alternatively, set FifoCacheConfig.MaxSizeItems to a positive integer, indicating maximum number of items in the cache. -// If both parameters are set, both methods are enforced, whichever hits first. - -// FifoCacheConfig holds config for the FifoCache. -type FifoCacheConfig struct { - MaxSizeMB int64 `yaml:"max_size_mb"` - MaxSizeItems int `yaml:"max_size_items"` // deprecated - TTL time.Duration `yaml:"ttl"` - - PurgeInterval time.Duration -} - -func parsebytes(s string) (uint64, error) { - if len(s) == 0 { - return 0, nil - } - bytes, err := humanize.ParseBytes(s) - if err != nil { - return 0, errors.Wrap(err, "invalid FifoCache config") - } - return bytes, nil -} - -// FifoCache is a simple string -> interface{} cache which uses a fifo slide to -// manage evictions. O(1) inserts and updates, O(1) gets. -type FifoCache struct { - cacheType stats.CacheType - - lock sync.RWMutex - maxSizeItems int - maxSizeBytes uint64 - currSizeBytes uint64 - - entries map[string]*list.Element - lru *list.List - - done chan struct{} - - entriesAdded prometheus.Counter - entriesAddedNew prometheus.Counter - entriesEvicted *prometheus.CounterVec - entriesCurrent prometheus.Gauge - totalGets prometheus.Counter - totalMisses prometheus.Counter - staleGets prometheus.Counter - memoryBytes prometheus.Gauge -} - -const ( - expiredReason string = "expired" //nolint:staticcheck - fullReason = "full" - tooBigReason = "object too big" -) - -type cacheEntry struct { - updated time.Time - key string - value []byte -} - -// NewFifoCache returns a new initialised FifoCache of size. -func NewFifoCache(name string, cfg FifoCacheConfig, reg prometheus.Registerer, logger log.Logger, cacheType stats.CacheType) *FifoCache { - util_log.WarnExperimentalUse(fmt.Sprintf("In-memory (FIFO) cache - %s", name), logger) - - maxSizeBytes := uint64(cfg.MaxSizeMB * 1e6) - - if maxSizeBytes == 0 && cfg.MaxSizeItems == 0 { - // zero cache capacity - no need to create cache - level.Warn(logger).Log("msg", "neither fifocache.max-size-bytes nor fifocache.max-size-items is set", "cache", name) - return nil - } - - // Set a default interval for the ticker - // This can be overwritten to a smaller value in tests - if cfg.PurgeInterval == 0 { - cfg.PurgeInterval = 1 * time.Minute - } - - cache := &FifoCache{ - cacheType: cacheType, - - maxSizeItems: cfg.MaxSizeItems, - maxSizeBytes: maxSizeBytes, - entries: make(map[string]*list.Element), - lru: list.New(), - - done: make(chan struct{}), - - entriesAdded: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "added_total", - Help: "The total number of Put calls on the cache", - ConstLabels: prometheus.Labels{"cache": name}, - }), - - entriesAddedNew: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "added_new_total", - Help: "The total number of new entries added to the cache", - ConstLabels: prometheus.Labels{"cache": name}, - }), - - entriesEvicted: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "evicted_total", - Help: "The total number of evicted entries", - ConstLabels: prometheus.Labels{"cache": name}, - }, []string{"reason"}), - - entriesCurrent: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "entries", - Help: "The total number of entries", - ConstLabels: prometheus.Labels{"cache": name}, - }), - - totalGets: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "gets_total", - Help: "The total number of Get calls", - ConstLabels: prometheus.Labels{"cache": name}, - }), - - totalMisses: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "misses_total", - Help: "The total number of Get calls that had no valid entry", - ConstLabels: prometheus.Labels{"cache": name}, - }), - - staleGets: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "stale_gets_total", - Help: "The total number of Get calls that had an entry which expired (deprecated)", - ConstLabels: prometheus.Labels{"cache": name}, - }), - - memoryBytes: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "memory_bytes", - Help: "The current cache size in bytes", - ConstLabels: prometheus.Labels{"cache": name}, - }), - } - - if cfg.TTL > 0 { - go cache.runPruneJob(cfg.PurgeInterval, cfg.TTL) - } - - return cache -} - -func (c *FifoCache) runPruneJob(interval, ttl time.Duration) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-c.done: - return - case <-ticker.C: - c.pruneExpiredItems(ttl) - } - } -} - -// pruneExpiredItems prunes items in the cache that exceeded their ttl -func (c *FifoCache) pruneExpiredItems(ttl time.Duration) { - c.lock.Lock() - defer c.lock.Unlock() - - for k, v := range c.entries { - entry := v.Value.(*cacheEntry) - if time.Since(entry.updated) > ttl { - _ = c.lru.Remove(v).(*cacheEntry) - delete(c.entries, k) - c.currSizeBytes -= sizeOf(entry) - c.entriesCurrent.Dec() - c.entriesEvicted.WithLabelValues(expiredReason).Inc() - } - } -} - -// Fetch implements Cache. -func (c *FifoCache) Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missing []string, err error) { - found, missing, bufs = make([]string, 0, len(keys)), make([]string, 0, len(keys)), make([][]byte, 0, len(keys)) - for _, key := range keys { - val, ok := c.Get(ctx, key) - if !ok { - missing = append(missing, key) - continue - } - - found = append(found, key) - bufs = append(bufs, val) - } - return -} - -// Store implements Cache. -func (c *FifoCache) Store(_ context.Context, keys []string, values [][]byte) error { - c.entriesAdded.Inc() - - c.lock.Lock() - defer c.lock.Unlock() - - for i := range keys { - c.put(keys[i], values[i]) - } - return nil -} - -// Stop implements Cache. -func (c *FifoCache) Stop() { - c.lock.Lock() - defer c.lock.Unlock() - - close(c.done) - - c.entries = make(map[string]*list.Element) - c.lru.Init() - c.currSizeBytes = 0 - - c.entriesCurrent.Set(float64(0)) - c.memoryBytes.Set(float64(0)) -} - -func (c *FifoCache) GetCacheType() stats.CacheType { - return c.cacheType -} - -func (c *FifoCache) put(key string, value []byte) { - // See if we already have the item in the cache. - element, ok := c.entries[key] - if ok { - // Remove the item from the cache. - entry := c.lru.Remove(element).(*cacheEntry) - delete(c.entries, key) - c.currSizeBytes -= sizeOf(entry) - c.entriesCurrent.Dec() - } - - entry := &cacheEntry{ - updated: time.Now(), - key: key, - value: value, - } - entrySz := sizeOf(entry) - - if c.maxSizeBytes > 0 && entrySz > c.maxSizeBytes { - // Cannot keep this item in the cache. - if ok { - // We do not replace this item. - c.entriesEvicted.WithLabelValues(tooBigReason).Inc() - } - c.memoryBytes.Set(float64(c.currSizeBytes)) - return - } - - // Otherwise, see if we need to evict item(s). - for (c.maxSizeBytes > 0 && c.currSizeBytes+entrySz > c.maxSizeBytes) || (c.maxSizeItems > 0 && len(c.entries) >= c.maxSizeItems) { - lastElement := c.lru.Back() - if lastElement == nil { - break - } - evicted := c.lru.Remove(lastElement).(*cacheEntry) - delete(c.entries, evicted.key) - c.currSizeBytes -= sizeOf(evicted) - c.entriesCurrent.Dec() - c.entriesEvicted.WithLabelValues(fullReason).Inc() - } - - // Finally, we have space to add the item. - c.entries[key] = c.lru.PushFront(entry) - c.currSizeBytes += entrySz - if !ok { - c.entriesAddedNew.Inc() - } - c.entriesCurrent.Inc() - c.memoryBytes.Set(float64(c.currSizeBytes)) -} - -// Get returns the stored value against the key and when the key was last updated. -func (c *FifoCache) Get(_ context.Context, key string) ([]byte, bool) { - c.totalGets.Inc() - - c.lock.RLock() - defer c.lock.RUnlock() - - element, ok := c.entries[key] - if ok { - entry := element.Value.(*cacheEntry) - return entry.value, true - } - - c.totalMisses.Inc() - return nil, false -} - -func sizeOf(item *cacheEntry) uint64 { - return uint64(int(unsafe.Sizeof(*item)) + // size of cacheEntry - len(item.key) + // size of key - cap(item.value) + // size of value - elementSize + // size of the element in linked list - elementPrtSize) // size of the pointer to an element in the map -} diff --git a/pkg/storage/chunk/tests/caching_fixtures_test.go b/pkg/storage/chunk/tests/caching_fixtures_test.go index 44404afe50c02..61c40ef317ec0 100644 --- a/pkg/storage/chunk/tests/caching_fixtures_test.go +++ b/pkg/storage/chunk/tests/caching_fixtures_test.go @@ -31,7 +31,7 @@ func (f fixture) Clients() (index.Client, client.Client, index.TableClient, conf indexClient, chunkClient, tableClient, schemaConfig, closer, err := f.fixture.Clients() reg := prometheus.NewRegistry() logger := log.NewNopLogger() - indexClient = index.NewCachingIndexClient(indexClient, cache.NewFifoCache("index-fifo", cache.FifoCacheConfig{ + indexClient = index.NewCachingIndexClient(indexClient, cache.NewEmbeddedCache("index-fifo", cache.EmbeddedCacheConfig{ MaxSizeItems: 500, TTL: 5 * time.Minute, }, reg, logger, stats.ChunkCache), 5*time.Minute, limits, logger, false) diff --git a/pkg/storage/stores/series/index/caching_index_client_test.go b/pkg/storage/stores/series/index/caching_index_client_test.go index 307304b0590a7..dcb7b90a82fa9 100644 --- a/pkg/storage/stores/series/index/caching_index_client_test.go +++ b/pkg/storage/stores/series/index/caching_index_client_test.go @@ -49,7 +49,7 @@ func TestCachingStorageClientBasic(t *testing.T) { limits, err := defaultLimits() require.NoError(t, err) logger := log.NewNopLogger() - cache := cache.NewFifoCache("test", cache.FifoCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test") + cache := cache.NewEmbeddedCache("test", cache.EmbeddedCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test") client := index.NewCachingIndexClient(store, cache, 1*time.Second, limits, logger, false) queries := []index.Query{{ TableName: "table", @@ -81,7 +81,7 @@ func TestTempCachingStorageClient(t *testing.T) { limits, err := defaultLimits() require.NoError(t, err) logger := log.NewNopLogger() - cache := cache.NewFifoCache("test", cache.FifoCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test") + cache := cache.NewEmbeddedCache("test", cache.EmbeddedCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test") client := index.NewCachingIndexClient(store, cache, 100*time.Millisecond, limits, logger, false) queries := []index.Query{ {TableName: "table", HashValue: "foo"}, @@ -140,7 +140,7 @@ func TestPermCachingStorageClient(t *testing.T) { limits, err := defaultLimits() require.NoError(t, err) logger := log.NewNopLogger() - cache := cache.NewFifoCache("test", cache.FifoCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test") + cache := cache.NewEmbeddedCache("test", cache.EmbeddedCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test") client := index.NewCachingIndexClient(store, cache, 100*time.Millisecond, limits, logger, false) queries := []index.Query{ {TableName: "table", HashValue: "foo", Immutable: true}, @@ -196,7 +196,7 @@ func TestCachingStorageClientEmptyResponse(t *testing.T) { limits, err := defaultLimits() require.NoError(t, err) logger := log.NewNopLogger() - cache := cache.NewFifoCache("test", cache.FifoCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test") + cache := cache.NewEmbeddedCache("test", cache.EmbeddedCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test") client := index.NewCachingIndexClient(store, cache, 1*time.Second, limits, logger, false) queries := []index.Query{{TableName: "table", HashValue: "foo"}} err = client.QueryPages(ctx, queries, func(_ index.Query, batch index.ReadBatchResult) bool { @@ -235,7 +235,7 @@ func TestCachingStorageClientCollision(t *testing.T) { limits, err := defaultLimits() require.NoError(t, err) logger := log.NewNopLogger() - cache := cache.NewFifoCache("test", cache.FifoCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test") + cache := cache.NewEmbeddedCache("test", cache.EmbeddedCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test") client := index.NewCachingIndexClient(store, cache, 1*time.Second, limits, logger, false) queries := []index.Query{ {TableName: "table", HashValue: "foo", RangeValuePrefix: []byte("bar")}, @@ -415,7 +415,7 @@ func TestCachingStorageClientStoreQueries(t *testing.T) { require.NoError(t, err) logger := log.NewNopLogger() cache := &mockCache{ - Cache: cache.NewFifoCache("test", cache.FifoCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test"), + Cache: cache.NewEmbeddedCache("test", cache.EmbeddedCacheConfig{MaxSizeItems: 10, TTL: 10 * time.Second}, nil, logger, "test"), } client := index.NewCachingIndexClient(store, cache, 1*time.Second, limits, logger, disableBroadQueries) var callbackQueries []index.Query diff --git a/pkg/storage/stores/series/series_store_test.go b/pkg/storage/stores/series/series_store_test.go index fd68299a86223..4bb2fc531a574 100644 --- a/pkg/storage/stores/series/series_store_test.go +++ b/pkg/storage/stores/series/series_store_test.go @@ -54,9 +54,7 @@ var ( configFn: func() config.ChunkStoreConfig { var storeCfg config.ChunkStoreConfig flagext.DefaultValues(&storeCfg) - storeCfg.WriteDedupeCacheConfig.Cache = cache.NewFifoCache("test", cache.FifoCacheConfig{ - MaxSizeItems: 500, - }, prometheus.NewRegistry(), log.NewNopLogger(), stats.ChunkCache) + storeCfg.WriteDedupeCacheConfig.Cache = cache.NewEmbeddedCache("test", cache.EmbeddedCacheConfig{MaxSizeItems: 500}, prometheus.NewRegistry(), log.NewNopLogger(), stats.ChunkCache) return storeCfg }, }, diff --git a/pkg/storage/stores/tsdb/cached_postings_index_test.go b/pkg/storage/stores/tsdb/cached_postings_index_test.go index 7409ec9e53fcb..bc43271f764f8 100644 --- a/pkg/storage/stores/tsdb/cached_postings_index_test.go +++ b/pkg/storage/stores/tsdb/cached_postings_index_test.go @@ -19,8 +19,8 @@ import ( func TestSingleIdxCached(t *testing.T) { // setup cache. - cfg := cache.FifoCacheConfig{MaxSizeMB: 1} - c := cache.NewFifoCache("test-cache", cfg, nil, log.NewNopLogger(), "test") + cfg := cache.EmbeddedCacheConfig{MaxSizeMB: 1} + c := cache.NewEmbeddedCache("test-cache", cfg, nil, log.NewNopLogger(), "test") defer c.Stop() cases := []LoadableSeries{ @@ -217,8 +217,8 @@ func TestSingleIdxCached(t *testing.T) { func BenchmarkCacheableTSDBIndex_GetChunkRefs(b *testing.B) { // setup cache. - cfg := cache.FifoCacheConfig{MaxSizeMB: 1} - c := cache.NewFifoCache("test-cache", cfg, nil, log.NewNopLogger(), "test") + cfg := cache.EmbeddedCacheConfig{MaxSizeMB: 1} + c := cache.NewEmbeddedCache("test-cache", cfg, nil, log.NewNopLogger(), "test") defer c.Stop() now := model.Now() @@ -273,8 +273,8 @@ func BenchmarkCacheableTSDBIndex_GetChunkRefs(b *testing.B) { func TestCacheableTSDBIndex_Stats(t *testing.T) { // setup cache. - cfg := cache.FifoCacheConfig{MaxSizeMB: 1} - c := cache.NewFifoCache("test-cache", cfg, nil, log.NewNopLogger(), "test") + cfg := cache.EmbeddedCacheConfig{MaxSizeMB: 1} + c := cache.NewEmbeddedCache("test-cache", cfg, nil, log.NewNopLogger(), "test") defer c.Stop() series := []LoadableSeries{ @@ -388,8 +388,8 @@ func TestCacheableTSDBIndex_Stats(t *testing.T) { func BenchmarkSeriesRepetitive(b *testing.B) { // setup cache. - cfg := cache.FifoCacheConfig{MaxSizeMB: 1} - c := cache.NewFifoCache("test-cache", cfg, nil, log.NewNopLogger(), "test") + cfg := cache.EmbeddedCacheConfig{MaxSizeMB: 1} + c := cache.NewEmbeddedCache("test-cache", cfg, nil, log.NewNopLogger(), "test") defer c.Stop() series := []LoadableSeries{ @@ -443,8 +443,8 @@ func BenchmarkSeriesRepetitive(b *testing.B) { func TestMultipleIndexesFiles(t *testing.T) { // setup cache. - cfg := cache.FifoCacheConfig{MaxSizeMB: 1} - c := cache.NewFifoCache("test-cache", cfg, nil, log.NewNopLogger(), "test") + cfg := cache.EmbeddedCacheConfig{MaxSizeMB: 1} + c := cache.NewEmbeddedCache("test-cache", cfg, nil, log.NewNopLogger(), "test") defer c.Stop() series := []LoadableSeries{ From 7b29a65152a569cad09a06e029d8f5af5dc5b0b2 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Fri, 22 Sep 2023 15:01:45 +0530 Subject: [PATCH 11/14] fixup! rename fifocache to embedded cache --- pkg/loki/modules.go | 4 ++-- pkg/storage/chunk/cache/cache_test.go | 2 +- pkg/storage/chunk/cache/embeddedcache_test.go | 4 ++-- pkg/storage/chunk/tests/caching_fixtures_test.go | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index e5c6f487b9581..f4c4981ff8ba5 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -583,7 +583,7 @@ func (t *Loki) initStore() (_ services.Service, err error) { switch true { case t.Cfg.isModuleEnabled(Ingester), t.Cfg.isModuleEnabled(Write): - // Use fifo cache for caching index in memory, this also significantly helps performance. + // Use embedded cache for caching index in memory, this also significantly helps performance. t.Cfg.StorageConfig.IndexQueriesCacheConfig = cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ Enabled: true, @@ -591,7 +591,7 @@ func (t *Loki) initStore() (_ services.Service, err error) { // This is a small hack to save some CPU cycles. // We check if the object is still valid after pulling it from cache using the IndexCacheValidity value // however it has to be deserialized to do so, setting the cache validity to some arbitrary amount less than the - // IndexCacheValidity guarantees the FIFO cache will expire the object first which can be done without + // IndexCacheValidity guarantees the Embedded cache will expire the object first which can be done without // having to deserialize the object. TTL: t.Cfg.StorageConfig.IndexCacheValidity - 1*time.Minute, }, diff --git a/pkg/storage/chunk/cache/cache_test.go b/pkg/storage/chunk/cache/cache_test.go index efe33c6f94c8f..e65339066ad44 100644 --- a/pkg/storage/chunk/cache/cache_test.go +++ b/pkg/storage/chunk/cache/cache_test.go @@ -204,7 +204,7 @@ func TestMemcache(t *testing.T) { }) } -func TestFifoCache(t *testing.T) { +func TestEmbeddedCache(t *testing.T) { cache := cache.NewEmbeddedCache("test", cache.EmbeddedCacheConfig{MaxSizeItems: 1e3, TTL: 1 * time.Hour}, nil, log.NewNopLogger(), "test") testCache(t, cache) diff --git a/pkg/storage/chunk/cache/embeddedcache_test.go b/pkg/storage/chunk/cache/embeddedcache_test.go index 1b4a6d5406b45..08f71353fd2d0 100644 --- a/pkg/storage/chunk/cache/embeddedcache_test.go +++ b/pkg/storage/chunk/cache/embeddedcache_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestFifoCacheEviction(t *testing.T) { +func TestEmbeddedCacheEviction(t *testing.T) { const ( cnt = 10 evicted = 5 @@ -170,7 +170,7 @@ func TestFifoCacheEviction(t *testing.T) { } } -func TestFifoCacheExpiry(t *testing.T) { +func TestEmbeddedCacheExpiry(t *testing.T) { key1, key2, key3, key4 := "01", "02", "03", "04" data1, data2, data3, data4 := genBytes(32), genBytes(64), genBytes(128), genBytes(32) diff --git a/pkg/storage/chunk/tests/caching_fixtures_test.go b/pkg/storage/chunk/tests/caching_fixtures_test.go index 61c40ef317ec0..194f2b1276e0a 100644 --- a/pkg/storage/chunk/tests/caching_fixtures_test.go +++ b/pkg/storage/chunk/tests/caching_fixtures_test.go @@ -31,7 +31,7 @@ func (f fixture) Clients() (index.Client, client.Client, index.TableClient, conf indexClient, chunkClient, tableClient, schemaConfig, closer, err := f.fixture.Clients() reg := prometheus.NewRegistry() logger := log.NewNopLogger() - indexClient = index.NewCachingIndexClient(indexClient, cache.NewEmbeddedCache("index-fifo", cache.EmbeddedCacheConfig{ + indexClient = index.NewCachingIndexClient(indexClient, cache.NewEmbeddedCache("index-embedded", cache.EmbeddedCacheConfig{ MaxSizeItems: 500, TTL: 5 * time.Minute, }, reg, logger, stats.ChunkCache), 5*time.Minute, limits, logger, false) From 896bfa48493b8f86e31b98ac47ca3205b0f972fb Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Fri, 22 Sep 2023 15:08:10 +0530 Subject: [PATCH 12/14] remove fifo cache from frontend configure docs --- docs/sources/configure/query-frontend.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/sources/configure/query-frontend.md b/docs/sources/configure/query-frontend.md index 5a4d8c96caf0d..3aaef4ef50468 100644 --- a/docs/sources/configure/query-frontend.md +++ b/docs/sources/configure/query-frontend.md @@ -53,11 +53,11 @@ data: results_cache: cache: - # We're going to use the in-process "FIFO" cache - enable_fifocache: true - fifocache: - size: 1024 - validity: 24h + # We're going to use the in-process embedded cache + embedded_cache: + enabled: true + max_size_mb: 100 + ttl: 24h limits_config: max_cache_freshness_per_query: '10m' From 38c32ee7f3ce56a726a7e3b4bc349ec2c0571d73 Mon Sep 17 00:00:00 2001 From: Ashwanth Goli Date: Fri, 22 Sep 2023 17:49:53 +0530 Subject: [PATCH 13/14] fix broken TestNewTripperware_Caches. needs improvement (in a follow-up) --- docs/sources/configure/_index.md | 4 ++++ pkg/querier/queryrange/roundtrip_test.go | 29 ++++++++---------------- 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index 275c59d2de229..b2371f93c3c85 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -4079,6 +4079,10 @@ embedded_cache: # CLI flag: -.embedded-cache.max-size-mb [max_size_mb: | default = 100] + # Maximum number of entries in the cache. + # CLI flag: -.embedded-cache.max-size-items + [max_size_items: | default = 0] + # The time to live for items in the cache before they get purged. # CLI flag: -.embedded-cache.ttl [ttl: | default = 1h] diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index d54b5965ebc32..3dfe9cc3175d5 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -732,11 +732,10 @@ func TestVolumeTripperware(t *testing.T) { func TestNewTripperware_Caches(t *testing.T) { for _, tc := range []struct { - name string - config Config - numCaches int - equalCaches bool - err string + name string + config Config + numCaches int + err string }{ { name: "results cache disabled, stats cache disabled", @@ -784,9 +783,8 @@ func TestNewTripperware_Caches(t *testing.T) { }, CacheIndexStatsResults: true, }, - numCaches: 2, - equalCaches: true, - err: "", + numCaches: 2, + err: "", }, { name: "results cache enabled, stats cache enabled but different", @@ -814,9 +812,8 @@ func TestNewTripperware_Caches(t *testing.T) { }, }, }, - numCaches: 2, - equalCaches: false, - err: "", + numCaches: 2, + err: "", }, { name: "results cache enabled (no config provided)", @@ -858,19 +855,13 @@ func TestNewTripperware_Caches(t *testing.T) { if s != nil { c, ok := s.(cache.Cache) require.True(t, ok) + + require.NotNil(t, c) caches = append(caches, c) } } require.Equal(t, tc.numCaches, len(caches)) - - if tc.numCaches == 2 { - if tc.equalCaches { - require.Equal(t, caches[0], caches[1]) - } else { - require.NotEqual(t, caches[0], caches[1]) - } - } }) } } From 6941b1159fa89043a756a1135283697f18c33e74 Mon Sep 17 00:00:00 2001 From: Ashwanth Date: Fri, 22 Sep 2023 18:27:35 +0530 Subject: [PATCH 14/14] Update docs/sources/configure/query-frontend.md Co-authored-by: Christian Haudum --- docs/sources/configure/query-frontend.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/configure/query-frontend.md b/docs/sources/configure/query-frontend.md index 3aaef4ef50468..a10454aed284b 100644 --- a/docs/sources/configure/query-frontend.md +++ b/docs/sources/configure/query-frontend.md @@ -53,7 +53,7 @@ data: results_cache: cache: - # We're going to use the in-process embedded cache + # We're going to use the embedded cache embedded_cache: enabled: true max_size_mb: 100