Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Trigger compaction prior retention. #3665

Merged
merged 9 commits into from
Apr 30, 2021
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
113 changes: 28 additions & 85 deletions pkg/storage/stores/shipper/compactor/compactor.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"github.com/grafana/loki/pkg/storage/stores/shipper/compactor/retention"
shipper_util "github.com/grafana/loki/pkg/storage/stores/shipper/util"
"github.com/grafana/loki/pkg/storage/stores/util"
errUtil "github.com/grafana/loki/pkg/util"
)

const delimiter = "/"
Expand All @@ -33,7 +32,6 @@ type Config struct {
SharedStoreKeyPrefix string `yaml:"shared_store_key_prefix"`
CompactionInterval time.Duration `yaml:"compaction_interval"`
RetentionEnabled bool `yaml:"retention_enabled"`
RetentionInterval time.Duration `yaml:"retention_interval"`
RetentionDeleteDelay time.Duration `yaml:"retention_delete_delay"`
RetentionDeleteWorkCount int `yaml:"retention_delete_worker_count"`
}
Expand All @@ -43,8 +41,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.StringVar(&cfg.WorkingDirectory, "boltdb.shipper.compactor.working-directory", "", "Directory where files can be downloaded for compaction.")
f.StringVar(&cfg.SharedStoreType, "boltdb.shipper.compactor.shared-store", "", "Shared store used for storing boltdb files. Supported types: gcs, s3, azure, swift, filesystem")
f.StringVar(&cfg.SharedStoreKeyPrefix, "boltdb.shipper.compactor.shared-store.key-prefix", "index/", "Prefix to add to Object Keys in Shared store. Path separator(if any) should always be a '/'. Prefix should never start with a separator but should always end with it.")
f.DurationVar(&cfg.CompactionInterval, "boltdb.shipper.compactor.compaction-interval", 2*time.Hour, "Interval at which to re-run the compaction operation.")
f.DurationVar(&cfg.RetentionInterval, "boltdb.shipper.compactor.retention-interval", 10*time.Minute, "Interval at which to re-run the retention operation.")
cyriltovena marked this conversation as resolved.
Show resolved Hide resolved
f.DurationVar(&cfg.CompactionInterval, "boltdb.shipper.compactor.compaction-interval", 10*time.Minute, "Interval at which to re-run the compaction operation.")
f.DurationVar(&cfg.RetentionDeleteDelay, "boltdb.shipper.compactor.retention-delete-delay", 2*time.Hour, "Delay after which chunks will be fully deleted during retention.")
f.BoolVar(&cfg.RetentionEnabled, "boltdb.shipper.compactor.retention-enabled", false, "(Experimental) Activate custom (per-stream,per-tenant) retention.")
f.IntVar(&cfg.RetentionDeleteWorkCount, "boltdb.shipper.compactor.retention-delete-worker-count", 150, "The total amount of worker to use to delete chunks.")
Expand All @@ -65,10 +62,9 @@ type Compactor struct {

cfg Config
objectClient chunk.ObjectClient
tableMarker *retention.Marker
tableMarker retention.TableMarker
sweeper *retention.Sweeper

metrics *metrics
metrics *metrics
}

func NewCompactor(cfg Config, storageConfig storage.Config, schemaConfig loki_storage.SchemaConfig, limits retention.Limits, r prometheus.Registerer) (*Compactor, error) {
Expand All @@ -93,20 +89,20 @@ func NewCompactor(cfg Config, storageConfig storage.Config, schemaConfig loki_st
if err != nil {
return nil, err
}
marker, err := retention.NewMarker(retentionWorkDir, schemaConfig, prefixedClient, retention.NewExpirationChecker(limits), r)
if err != nil {
return nil, err
}
compactor := Compactor{

compactor := &Compactor{
cfg: cfg,
objectClient: prefixedClient,
metrics: newMetrics(r),
tableMarker: marker,
sweeper: sweeper,
}

marker, err := retention.NewMarker(retentionWorkDir, schemaConfig, retention.NewExpirationChecker(limits), r)
if err != nil {
return nil, err
}
compactor.tableMarker = marker
compactor.Service = services.NewBasicService(nil, compactor.loop, nil)
return &compactor, nil
return compactor, nil
}

func (c *Compactor) loop(ctx context.Context) error {
Expand All @@ -116,12 +112,6 @@ func (c *Compactor) loop(ctx context.Context) error {
level.Error(util_log.Logger).Log("msg", "failed to run compaction", "err", err)
}
}
runRetention := func() {
err := c.RunRetention(ctx)
if err != nil {
level.Error(util_log.Logger).Log("msg", "failed to run retention", "err", err)
}
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
Expand All @@ -141,7 +131,7 @@ func (c *Compactor) loop(ctx context.Context) error {
}
}()
if c.cfg.RetentionEnabled {
wg.Add(2)
wg.Add(1)
go func() {
// starts the chunk sweeper
defer func() {
Expand All @@ -151,27 +141,27 @@ func (c *Compactor) loop(ctx context.Context) error {
c.sweeper.Start()
<-ctx.Done()
}()
go func() {
// start the index marker
defer wg.Done()
ticker := time.NewTicker(c.cfg.RetentionInterval)
defer ticker.Stop()

for {
select {
case <-ticker.C:
runRetention()
case <-ctx.Done():
return
}
}
}()
}

wg.Wait()
return nil
}

func (c *Compactor) CompactTable(ctx context.Context, tableName string) error {
table, err := newTable(ctx, filepath.Join(c.cfg.WorkingDirectory, tableName), c.objectClient, c.cfg.RetentionEnabled, c.tableMarker)
if err != nil {
level.Error(util_log.Logger).Log("msg", "failed to initialize table for compaction", "table", tableName, "err", err)
return err
}

err = table.compact()
if err != nil {
level.Error(util_log.Logger).Log("msg", "failed to compact files", "table", tableName, "err", err)
return err
}
return nil
}

func (c *Compactor) RunCompaction(ctx context.Context) error {
status := statusSuccess
start := time.Now()
Expand All @@ -196,19 +186,9 @@ func (c *Compactor) RunCompaction(ctx context.Context) error {
}

for _, tableName := range tables {
table, err := newTable(ctx, filepath.Join(c.cfg.WorkingDirectory, tableName), c.objectClient)
if err != nil {
status = statusFailure
level.Error(util_log.Logger).Log("msg", "failed to initialize table for compaction", "table", tableName, "err", err)
continue
}

err = table.compact()
if err != nil {
if err := c.CompactTable(ctx, tableName); err != nil {
status = statusFailure
level.Error(util_log.Logger).Log("msg", "failed to compact files", "table", tableName, "err", err)
}

// check if context was cancelled before going for next table.
select {
case <-ctx.Done():
Expand All @@ -219,40 +199,3 @@ func (c *Compactor) RunCompaction(ctx context.Context) error {

return nil
}

func (c *Compactor) RunRetention(ctx context.Context) error {
status := statusSuccess
start := time.Now()

defer func() {
level.Debug(util_log.Logger).Log("msg", "finished to processing retention on all tables", "status", status, "duration", time.Since(start))
c.metrics.retentionOperationTotal.WithLabelValues(status).Inc()
if status == statusSuccess {
c.metrics.retentionOperationDurationSeconds.Set(time.Since(start).Seconds())
c.metrics.retentionOperationLastSuccess.SetToCurrentTime()
}
}()
level.Debug(util_log.Logger).Log("msg", "starting to processing retention on all all tables")

_, dirs, err := c.objectClient.List(ctx, "", delimiter)
if err != nil {
status = statusFailure
return err
}

tables := make([]string, len(dirs))
for i, dir := range dirs {
tables[i] = strings.TrimSuffix(string(dir), delimiter)
}

var errs errUtil.MultiError

for _, tableName := range tables {
if err := c.tableMarker.MarkForDelete(ctx, tableName); err != nil {
level.Error(util_log.Logger).Log("msg", "failed to mark table for deletes", "table", tableName, "err", err)
errs.Add(err)
status = statusFailure
}
}
return errs.Err()
}
3 changes: 1 addition & 2 deletions pkg/storage/stores/shipper/compactor/compactor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ func TestIsDefaults(t *testing.T) {
{&Config{}, false},
{&Config{
SharedStoreKeyPrefix: "index/",
CompactionInterval: 2 * time.Hour,
RetentionInterval: 10 * time.Minute,
CompactionInterval: 10 * time.Minute,
RetentionDeleteDelay: 2 * time.Hour,
RetentionDeleteWorkCount: 150,
}, true},
Expand Down
20 changes: 0 additions & 20 deletions pkg/storage/stores/shipper/compactor/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,6 @@ type metrics struct {
compactTablesOperationTotal *prometheus.CounterVec
compactTablesOperationDurationSeconds prometheus.Gauge
compactTablesOperationLastSuccess prometheus.Gauge

retentionOperationTotal *prometheus.CounterVec
retentionOperationDurationSeconds prometheus.Gauge
retentionOperationLastSuccess prometheus.Gauge
}

func newMetrics(r prometheus.Registerer) *metrics {
Expand All @@ -37,22 +33,6 @@ func newMetrics(r prometheus.Registerer) *metrics {
Name: "compact_tables_operation_last_successful_run_timestamp_seconds",
Help: "Unix timestamp of the last successful compaction run",
}),

retentionOperationTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: "loki_boltdb_shipper",
Name: "retention_operation_total",
Help: "Total number of retention applied by status",
}, []string{"status"}),
retentionOperationDurationSeconds: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Namespace: "loki_boltdb_shipper",
Name: "retention_operation_duration_seconds",
Help: "Time (in seconds) spent in applying retention for all the tables",
}),
retentionOperationLastSuccess: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Namespace: "loki_boltdb_shipper",
Name: "retention_operation_last_successful_run_timestamp_seconds",
Help: "Unix timestamp of the last successful retention run",
}),
}

return &m
Expand Down
Loading