diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index cf9bb386c1e1d..8d6bc0715f494 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -1263,19 +1263,20 @@ planner: # CLI flag: -bloom-build.planner.interval [planning_interval: | default = 8h] - # Newest day-table offset (from today, inclusive) to build blooms for. - # Increase to lower cost by not re-writing data to object storage too - # frequently since recent data changes more often at the cost of not having - # blooms available as quickly. + # Newest day-table offset (from today, inclusive) to build blooms for. 0 start + # building from today, 1 from yesterday and so on. Increase to lower cost by + # not re-writing data to object storage too frequently since recent data + # changes more often at the cost of not having blooms available as quickly. # CLI flag: -bloom-build.planner.min-table-offset - [min_table_offset: | default = 1] + [min_table_offset: | default = 0] - # Oldest day-table offset (from today, inclusive) to compact. This can be used - # to lower cost by not trying to compact older data which doesn't change. This + # Oldest day-table offset (from today, inclusive) to build blooms for. 1 till + # yesterday, 2 till day before yesterday and so on. This can be used to lower + # cost by not trying to build blooms for older data which doesn't change. This # can be optimized by aligning it with the maximum # `reject_old_samples_max_age` setting of any tenant. # CLI flag: -bloom-build.planner.max-table-offset - [max_table_offset: | default = 2] + [max_table_offset: | default = 1] retention: # Enable bloom retention. diff --git a/pkg/bloombuild/planner/config.go b/pkg/bloombuild/planner/config.go index 12dc16935961a..f6be8322f74d5 100644 --- a/pkg/bloombuild/planner/config.go +++ b/pkg/bloombuild/planner/config.go @@ -21,14 +21,14 @@ type Config struct { // RegisterFlagsWithPrefix registers flags for the bloom-planner configuration. func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.DurationVar(&cfg.PlanningInterval, prefix+".interval", 8*time.Hour, "Interval at which to re-run the bloom creation planning.") - f.IntVar(&cfg.MinTableOffset, prefix+".min-table-offset", 1, "Newest day-table offset (from today, inclusive) to build blooms for. Increase to lower cost by not re-writing data to object storage too frequently since recent data changes more often at the cost of not having blooms available as quickly.") + f.IntVar(&cfg.MinTableOffset, prefix+".min-table-offset", 0, "Newest day-table offset (from today, inclusive) to build blooms for. 0 start building from today, 1 from yesterday and so on. Increase to lower cost by not re-writing data to object storage too frequently since recent data changes more often at the cost of not having blooms available as quickly.") // TODO(owen-d): ideally we'd set this per tenant based on their `reject_old_samples_max_age` setting, // but due to how we need to discover tenants, we can't do that yet. Tenant+Period discovery is done by // iterating the table periods in object storage and looking for tenants within that period. // In order to have this done dynamically, we'd need to account for tenant specific overrides, which are also // dynamically reloaded. // I'm doing it the simple way for now. - f.IntVar(&cfg.MaxTableOffset, prefix+".max-table-offset", 2, "Oldest day-table offset (from today, inclusive) to compact. This can be used to lower cost by not trying to compact older data which doesn't change. This can be optimized by aligning it with the maximum `reject_old_samples_max_age` setting of any tenant.") + f.IntVar(&cfg.MaxTableOffset, prefix+".max-table-offset", 1, "Oldest day-table offset (from today, inclusive) to build blooms for. 1 till yesterday, 2 till day before yesterday and so on. This can be used to lower cost by not trying to build blooms for older data which doesn't change. This can be optimized by aligning it with the maximum `reject_old_samples_max_age` setting of any tenant.") cfg.RetentionConfig.RegisterFlagsWithPrefix(prefix+".retention", f) cfg.Queue.RegisterFlagsWithPrefix(prefix+".queue", f) } diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go index a83e93f28f545..656a0b533c3c2 100644 --- a/pkg/bloombuild/planner/planner_test.go +++ b/pkg/bloombuild/planner/planner_test.go @@ -22,6 +22,7 @@ import ( "github.com/grafana/loki/v3/pkg/bloombuild/planner/queue" "github.com/grafana/loki/v3/pkg/bloombuild/planner/strategies" "github.com/grafana/loki/v3/pkg/bloombuild/protos" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/chunk/cache" @@ -606,6 +607,36 @@ func Test_deleteOutdatedMetas(t *testing.T) { } } +func TestMinMaxTables(t *testing.T) { + logger := log.NewNopLogger() + //logger := log.NewLogfmtLogger(os.Stdout) + + cfg := Config{ + PlanningInterval: 1 * time.Hour, + Queue: queue.Config{ + MaxQueuedTasksPerTenant: 10000, + }, + // From today till day before tomorrow + MinTableOffset: 0, + MaxTableOffset: 2, + } + planner := createPlanner(t, cfg, &fakeLimits{}, logger) + + tables := planner.tables(time.Now()) + require.Equal(t, 3, tables.TotalDays()) + + dayTables, err := iter.Collect(tables) + require.NoError(t, err) + + todayTable := config.NewDayTable(config.NewDayTime(model.Now()), "index_") + yesterdayTable := config.NewDayTable(config.NewDayTime(model.Now().Add(-24*time.Hour)), "index_") + dayBeforeYesterdayTable := config.NewDayTable(config.NewDayTime(model.Now().Add(-48*time.Hour)), "index_") + + require.Equal(t, dayBeforeYesterdayTable.Addr(), dayTables[0].Addr()) + require.Equal(t, yesterdayTable.Addr(), dayTables[1].Addr()) + require.Equal(t, todayTable.Addr(), dayTables[2].Addr()) +} + type fakeBuilder struct { mx sync.Mutex // Protects tasks and currTaskIdx. id string