Skip to content

Commit

Permalink
Merge branch 'upstream/main' into enforce-query-tenancy
Browse files Browse the repository at this point in the history
Signed-off-by: Jacob Baungard Hansen <jacobbaungard@redhat.com>
  • Loading branch information
jacobbaungard committed Dec 18, 2023
2 parents 56dd9fb + 96b8dba commit 0f8f998
Show file tree
Hide file tree
Showing 81 changed files with 1,588 additions and 607 deletions.
12 changes: 6 additions & 6 deletions .busybox-versions
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Auto generated by busybox-updater.sh. DO NOT EDIT
amd64=393d14abb68b8b2d88304c72ac25b5ce130aa3a1d57ba7363e2c4d07d294513d
arm64=9fe410fe5b8f283d057939a5b0a6f464ecb4bfe4a07d132d2846cfbe82cf43ea
arm=a237b18458d6bcc8964e59ced627ea46eb9aae68875ea833c61d5050a742e624
ppc64le=cbb9892625fd0d4c625afe8255fe35699a163bc4d74925dfcca74ee7cc43d4ba
riscv64=fa1350d80e4481d3671d808fbe239e4075205f69c940e7e85711bdc39bf8e181
s390x=1e3e5a05847ad67da2b148d952931cf6f716a334ab06ea00742560a2ff985c7d
amd64=f173c44fab35484fa0e940e42929efe2a2f506feda431ba72c5f0d79639d7f55
arm64=6277ab6abe348994989b3959d7c125d7a487012aedb80570ec28652a012c69d6
arm=31533906c9eadc190de436bcbc021207d90839777c1b95991edd15e7df5d34ad
ppc64le=ace2ea29bf8e4267c293ed5570df249039a0bc3949d3371429a71cf114e8a9e2
riscv64=ff38cae5b5ed16251631e55156a7d92977bf5a8c4714ff6e3c333f7acb0297e0
s390x=59d0ed3060aef57d1b23bc353a2223af24a6e1d035486647eb599a77ff2d446e
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,13 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re
### Added

- [#6756](https://github.com/thanos-io/thanos/pull/6756) Query: Add the following options to allow enforcement of tenancy on the query path: `query.enable-tenancy`, `query.tenant-label-name`.
- [#6944](https://github.com/thanos-io/thanos/pull/6944) Receive: Added a new flag for maximum retention bytes.
- [#6891](https://github.com/thanos-io/thanos/pull/6891) Objstore: Bump `objstore` which adds support for Azure Workload Identity.
- [#6453](https://github.com/thanos-io/thanos/pull/6453) Sidecar: Added `--reloader.method` to support configuration reloads via SIHUP signal.
- [#6925](https://github.com/thanos-io/thanos/pull/6925) Store Gateway: Support float native histogram.
- [#6954](https://github.com/thanos-io/thanos/pull/6954) Index Cache: Support tracing for fetch APIs.
- [#6943](https://github.com/thanos-io/thanos/pull/6943) Ruler: Added `keep_firing_for` field in alerting rule.
- [#6972](https://github.com/thanos-io/thanos/pull/6972) Store Gateway: Apply series limit when streaming series for series actually matched if lazy postings is enabled.

### Changed

Expand Down
3 changes: 1 addition & 2 deletions MAINTAINERS.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@
| Prem Saraswat | prmsrswt@gmail.com | `@Prem Saraswat` | [@onprem](https://github.com/onprem) | Red Hat |
| Matthias Loibl | mail@matthiasloibl.com | `@metalmatze` | [@metalmatze](https://github.com/metalmatze) | Polar Signals |
| Ben Ye | yb532204897@gmail.com | `@yeya24` | [@yeya24](https://github.com/yeya24) | Amazon Web Services |
| Wiard van Rij | wiard@outlook.com | `@wiard van Rij` | [@wiardvanrij](https://github.com/wiardvanrij) | Roku |
| Matej Gera | matejgera@gmail.com | `@Matej Gera` | [@matej-g](https://github.com/matej-g) | Red Hat |
| Matej Gera | matejgera@gmail.com | `@Matej Gera` | [@matej-g](https://github.com/matej-g) | Coralogix |
| Filip Petkovski | filip.petkovsky@gmail.com | `@Filip Petkovski` | [@fpetkovski](https://github.com/fpetkovski) | Shopify |
| Saswata Mukherjee | saswata.mukhe@gmail.com | `@saswatamcode` | [@saswatamcode](https://github.com/saswatamcode) | Red Hat |
| Michael Hoffmann | mhoffm@posteo.de | `@Michael Hoffmann` | [@MichaHoffmann](https://github.com/MichaHoffmann) | Aiven |
Expand Down
3 changes: 2 additions & 1 deletion cmd/thanos/compact.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,8 @@ func runCompact(
consistencyDelayMetaFilter := block.NewConsistencyDelayMetaFilter(logger, conf.consistencyDelay, extprom.WrapRegistererWithPrefix("thanos_", reg))
timePartitionMetaFilter := block.NewTimePartitionMetaFilter(conf.filterConf.MinTime, conf.filterConf.MaxTime)

baseMetaFetcher, err := block.NewBaseFetcher(logger, conf.blockMetaFetchConcurrency, insBkt, conf.dataDir, extprom.WrapRegistererWithPrefix("thanos_", reg))
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, insBkt)
baseMetaFetcher, err := block.NewBaseFetcher(logger, conf.blockMetaFetchConcurrency, insBkt, baseBlockIDsFetcher, conf.dataDir, extprom.WrapRegistererWithPrefix("thanos_", reg))
if err != nil {
return errors.Wrap(err, "create meta fetcher")
}
Expand Down
16 changes: 16 additions & 0 deletions cmd/thanos/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,18 @@ type reloaderConfig struct {
ruleDirectories []string
watchInterval time.Duration
retryInterval time.Duration
method string
processName string
}

const (
// HTTPReloadMethod reloads the configuration using the HTTP reload endpoint.
HTTPReloadMethod = "http"

// SignalReloadMethod reloads the configuration sending a SIGHUP signal to the process.
SignalReloadMethod = "signal"
)

func (rc *reloaderConfig) registerFlag(cmd extkingpin.FlagClause) *reloaderConfig {
cmd.Flag("reloader.config-file",
"Config file watched by the reloader.").
Expand All @@ -132,6 +142,12 @@ func (rc *reloaderConfig) registerFlag(cmd extkingpin.FlagClause) *reloaderConfi
cmd.Flag("reloader.retry-interval",
"Controls how often reloader retries config reload in case of error.").
Default("5s").DurationVar(&rc.retryInterval)
cmd.Flag("reloader.method",
"Method used to reload the configuration.").
Default(HTTPReloadMethod).EnumVar(&rc.method, HTTPReloadMethod, SignalReloadMethod)
cmd.Flag("reloader.process-name",
"Executable name used to match the process being reloaded when using the signal method.").
Default("prometheus").StringVar(&rc.processName)

return rc
}
Expand Down
3 changes: 2 additions & 1 deletion cmd/thanos/downsample.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ func RunDownsample(
insBkt := objstoretracing.WrapWithTraces(objstore.WrapWithMetrics(bkt, extprom.WrapRegistererWithPrefix("thanos_", reg), bkt.Name()))

// While fetching blocks, filter out blocks that were marked for no downsample.
metaFetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, insBkt, "", extprom.WrapRegistererWithPrefix("thanos_", reg), []block.MetadataFilter{
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, insBkt)
metaFetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, insBkt, baseBlockIDsFetcher, "", extprom.WrapRegistererWithPrefix("thanos_", reg), []block.MetadataFilter{
block.NewDeduplicateFilter(block.FetcherConcurrency),
downsample.NewGatherNoDownsampleMarkFilter(logger, insBkt, block.FetcherConcurrency),
})
Expand Down
6 changes: 4 additions & 2 deletions cmd/thanos/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,8 @@ func TestRegression4960_Deadlock(t *testing.T) {

metrics := newDownsampleMetrics(prometheus.NewRegistry())
testutil.Equals(t, 0.0, promtest.ToFloat64(metrics.downsamples.WithLabelValues(meta.Thanos.GroupKey())))
metaFetcher, err := block.NewMetaFetcher(nil, block.FetcherConcurrency, bkt, "", nil, nil)
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, bkt)
metaFetcher, err := block.NewMetaFetcher(nil, block.FetcherConcurrency, bkt, baseBlockIDsFetcher, "", nil, nil)
testutil.Ok(t, err)

metas, _, err := metaFetcher.Fetch(ctx)
Expand Down Expand Up @@ -196,7 +197,8 @@ func TestCleanupDownsampleCacheFolder(t *testing.T) {

metrics := newDownsampleMetrics(prometheus.NewRegistry())
testutil.Equals(t, 0.0, promtest.ToFloat64(metrics.downsamples.WithLabelValues(meta.Thanos.GroupKey())))
metaFetcher, err := block.NewMetaFetcher(nil, block.FetcherConcurrency, bkt, "", nil, nil)
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, bkt)
metaFetcher, err := block.NewMetaFetcher(nil, block.FetcherConcurrency, bkt, baseBlockIDsFetcher, "", nil, nil)
testutil.Ok(t, err)

metas, _, err := metaFetcher.Fetch(ctx)
Expand Down
5 changes: 4 additions & 1 deletion cmd/thanos/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,10 @@ func registerQuery(app *extkingpin.App) {
Files: *fileSDFiles,
RefreshInterval: *fileSDInterval,
}
fileSD = file.NewDiscovery(conf, logger)
var err error
if fileSD, err = file.NewDiscovery(conf, logger, reg); err != nil {
return err
}
}

if *webRoutePrefix == "" {
Expand Down
11 changes: 11 additions & 0 deletions cmd/thanos/receive.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"strings"
"time"

"github.com/alecthomas/units"
extflag "github.com/efficientgo/tools/extkingpin"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
Expand Down Expand Up @@ -84,6 +85,7 @@ func registerReceive(app *extkingpin.App) {
MaxBlockDuration: int64(time.Duration(*conf.tsdbMaxBlockDuration) / time.Millisecond),
RetentionDuration: int64(time.Duration(*conf.retention) / time.Millisecond),
OutOfOrderTimeWindow: int64(time.Duration(*conf.tsdbOutOfOrderTimeWindow) / time.Millisecond),
MaxBytes: int64(conf.tsdbMaxBytes),
OutOfOrderCapMax: conf.tsdbOutOfOrderCapMax,
NoLockfile: conf.noLockFile,
WALCompression: wlog.ParseCompressionType(conf.walCompression, string(wlog.CompressionSnappy)),
Expand Down Expand Up @@ -165,6 +167,8 @@ func runReceive(
// Has this thanos receive instance been configured to ingest metrics into a local TSDB?
enableIngestion := receiveMode == receive.IngestorOnly || receiveMode == receive.RouterIngestor

isOOOEnabled := tsdbOpts.OutOfOrderTimeWindow > 0

upload := len(confContentYaml) > 0
if enableIngestion {
if upload {
Expand All @@ -174,6 +178,9 @@ func runReceive(
"Compaction needs to be disabled (tsdb.min-block-duration = tsdb.max-block-duration)", tsdbOpts.MaxBlockDuration, tsdbOpts.MinBlockDuration)
}
level.Warn(logger).Log("msg", "flag to ignore min/max block duration flags differing is being used. If the upload of a 2h block fails and a tsdb compaction happens that block may be missing from your Thanos bucket storage.")
if isOOOEnabled {
level.Warn(logger).Log("msg", "out-of-order support is also enabled which means that Receiver will now upload compacted blocks to not lose any data. Vertical compaction needs to be enabled on Compactor! See https://github.com/prometheus/prometheus/issues/13112")
}
}
// The background shipper continuously scans the data directory and uploads
// new blocks to object storage service.
Expand Down Expand Up @@ -211,6 +218,7 @@ func runReceive(
conf.tenantLabelName,
bkt,
conf.allowOutOfOrderUpload,
isOOOEnabled,
hashFunc,
)
writer := receive.NewWriter(log.With(logger, "component", "receive-writer"), dbs, &receive.WriterOptions{
Expand Down Expand Up @@ -809,6 +817,7 @@ type receiveConfig struct {
tsdbOutOfOrderCapMax int64
tsdbAllowOverlappingBlocks bool
tsdbMaxExemplars int64
tsdbMaxBytes units.Base2Bytes
tsdbWriteQueueSize int64
tsdbMemorySnapshotOnShutdown bool
tsdbEnableNativeHistograms bool
Expand Down Expand Up @@ -916,6 +925,8 @@ func (rc *receiveConfig) registerFlag(cmd extkingpin.FlagClause) {

cmd.Flag("tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge. Does not do anything, enabled all the time.").Default("false").BoolVar(&rc.tsdbAllowOverlappingBlocks)

cmd.Flag("tsdb.max-retention-bytes", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\". Based on powers-of-2, so 1KB is 1024B.").Default("0").BytesVar(&rc.tsdbMaxBytes)

cmd.Flag("tsdb.wal-compression", "Compress the tsdb WAL.").Default("true").BoolVar(&rc.walCompression)

cmd.Flag("tsdb.no-lockfile", "Do not create lockfile in TSDB data directory. In any case, the lockfiles will be deleted on next startup.").Default("false").BoolVar(&rc.noLockFile)
Expand Down
63 changes: 38 additions & 25 deletions cmd/thanos/sidecar.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@ package main

import (
"context"
"fmt"
"math"
"net/http"
"net/url"
"sync"
"time"
Expand Down Expand Up @@ -62,18 +64,44 @@ func registerSidecar(app *extkingpin.App) {
return errors.Wrap(err, "error while parsing config for request logging")
}

httpConfContentYaml, err := conf.prometheus.httpClient.Content()
if err != nil {
return errors.Wrap(err, "getting http client config")
}
httpClientConfig, err := httpconfig.NewClientConfigFromYAML(httpConfContentYaml)
if err != nil {
return errors.Wrap(err, "parsing http config YAML")
}

httpClient, err := httpconfig.NewHTTPClient(*httpClientConfig, "thanos-sidecar")
if err != nil {
return errors.Wrap(err, "Improper http client config")
}

opts := reloader.Options{
HTTPClient: *httpClient,
CfgFile: conf.reloader.confFile,
CfgOutputFile: conf.reloader.envVarConfFile,
WatchedDirs: conf.reloader.ruleDirectories,
WatchInterval: conf.reloader.watchInterval,
RetryInterval: conf.reloader.retryInterval,
}

switch conf.reloader.method {
case HTTPReloadMethod:
opts.ReloadURL = reloader.ReloadURLFromBase(conf.prometheus.url)
case SignalReloadMethod:
opts.ProcessName = conf.reloader.processName
opts.RuntimeInfoURL = reloader.RuntimeInfoURLFromBase(conf.prometheus.url)
default:
return fmt.Errorf("invalid reload method: %s", conf.reloader.method)
}

rl := reloader.New(log.With(logger, "component", "reloader"),
extprom.WrapRegistererWithPrefix("thanos_sidecar_", reg),
&reloader.Options{
ReloadURL: reloader.ReloadURLFromBase(conf.prometheus.url),
CfgFile: conf.reloader.confFile,
CfgOutputFile: conf.reloader.envVarConfFile,
WatchedDirs: conf.reloader.ruleDirectories,
WatchInterval: conf.reloader.watchInterval,
RetryInterval: conf.reloader.retryInterval,
})
&opts)

return runSidecar(g, logger, reg, tracer, rl, component.Sidecar, *conf, grpcLogOpts, tagOpts)
return runSidecar(g, logger, reg, tracer, rl, component.Sidecar, *conf, httpClient, grpcLogOpts, tagOpts)
})
}

Expand All @@ -85,28 +113,13 @@ func runSidecar(
reloader *reloader.Reloader,
comp component.Component,
conf sidecarConfig,
httpClient *http.Client,
grpcLogOpts []grpc_logging.Option,
tagOpts []tags.Option,
) error {
httpConfContentYaml, err := conf.prometheus.httpClient.Content()
if err != nil {
return errors.Wrap(err, "getting http client config")
}
httpClientConfig, err := httpconfig.NewClientConfigFromYAML(httpConfContentYaml)
if err != nil {
return errors.Wrap(err, "parsing http config YAML")
}

httpClient, err := httpconfig.NewHTTPClient(*httpClientConfig, "thanos-sidecar")
if err != nil {
return errors.Wrap(err, "Improper http client config")
}

reloader.SetHttpClient(*httpClient)

var m = &promMetadata{
promURL: conf.prometheus.url,

// Start out with the full time range. The shipper will constrain it later.
// TODO(fabxc): minimum timestamp is never adjusted if shipping is disabled.
mint: conf.limitMinTime.PrometheusTimestamp(),
Expand Down
3 changes: 2 additions & 1 deletion cmd/thanos/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,8 @@ func runStore(
}

ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(logger, insBkt, time.Duration(conf.ignoreDeletionMarksDelay), conf.blockMetaFetchConcurrency)
metaFetcher, err := block.NewMetaFetcher(logger, conf.blockMetaFetchConcurrency, insBkt, dataDir, extprom.WrapRegistererWithPrefix("thanos_", reg),
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, insBkt)
metaFetcher, err := block.NewMetaFetcher(logger, conf.blockMetaFetchConcurrency, insBkt, baseBlockIDsFetcher, dataDir, extprom.WrapRegistererWithPrefix("thanos_", reg),
[]block.MetadataFilter{
block.NewTimePartitionMetaFilter(conf.filterConf.MinTime, conf.filterConf.MaxTime),
block.NewLabelShardedMetaFilter(relabelConfig),
Expand Down
18 changes: 12 additions & 6 deletions cmd/thanos/tools_bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,8 @@ func registerBucketVerify(app extkingpin.AppClause, objStoreConfig *extflag.Path

// We ignore any block that has the deletion marker file.
filters := []block.MetadataFilter{block.NewIgnoreDeletionMarkFilter(logger, insBkt, 0, block.FetcherConcurrency)}
fetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, insBkt, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg), filters)
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, insBkt)
fetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, insBkt, baseBlockIDsFetcher, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg), filters)
if err != nil {
return err
}
Expand Down Expand Up @@ -407,7 +408,8 @@ func registerBucketLs(app extkingpin.AppClause, objStoreConfig *extflag.PathOrCo
ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(logger, insBkt, 0, block.FetcherConcurrency)
filters = append(filters, ignoreDeletionMarkFilter)
}
fetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, insBkt, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg), filters)
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, insBkt)
fetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, insBkt, baseBlockIDsFetcher, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg), filters)
if err != nil {
return err
}
Expand Down Expand Up @@ -508,7 +510,8 @@ func registerBucketInspect(app extkingpin.AppClause, objStoreConfig *extflag.Pat
}
insBkt := objstoretracing.WrapWithTraces(objstore.WrapWithMetrics(bkt, extprom.WrapRegistererWithPrefix("thanos_", reg), bkt.Name()))

fetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, insBkt, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg), nil)
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, insBkt)
fetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, insBkt, baseBlockIDsFetcher, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg), nil)
if err != nil {
return err
}
Expand Down Expand Up @@ -651,7 +654,8 @@ func registerBucketWeb(app extkingpin.AppClause, objStoreConfig *extflag.PathOrC
return err
}
// TODO(bwplotka): Allow Bucket UI to visualize the state of block as well.
fetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, insBkt, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg),
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, insBkt)
fetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, insBkt, baseBlockIDsFetcher, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg),
[]block.MetadataFilter{
block.NewTimePartitionMetaFilter(filterConf.MinTime, filterConf.MaxTime),
block.NewLabelShardedMetaFilter(relabelConfig),
Expand Down Expand Up @@ -829,7 +833,8 @@ func registerBucketCleanup(app extkingpin.AppClause, objStoreConfig *extflag.Pat

var sy *compact.Syncer
{
baseMetaFetcher, err := block.NewBaseFetcher(logger, tbc.blockSyncConcurrency, insBkt, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg))
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, insBkt)
baseMetaFetcher, err := block.NewBaseFetcher(logger, tbc.blockSyncConcurrency, insBkt, baseBlockIDsFetcher, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg))
if err != nil {
return errors.Wrap(err, "create meta fetcher")
}
Expand Down Expand Up @@ -1371,7 +1376,8 @@ func registerBucketRetention(app extkingpin.AppClause, objStoreConfig *extflag.P

var sy *compact.Syncer
{
baseMetaFetcher, err := block.NewBaseFetcher(logger, tbc.blockSyncConcurrency, insBkt, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg))
baseBlockIDsFetcher := block.NewBaseBlockIDsFetcher(logger, insBkt)
baseMetaFetcher, err := block.NewBaseFetcher(logger, tbc.blockSyncConcurrency, insBkt, baseBlockIDsFetcher, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg))
if err != nil {
return errors.Wrap(err, "create meta fetcher")
}
Expand Down
5 changes: 5 additions & 0 deletions docs/components/receive.md
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,11 @@ Flags:
ingesting a new exemplar will evict the oldest
exemplar from storage. 0 (or less) value of
this flag disables exemplars storage.
--tsdb.max-retention-bytes=0
Maximum number of bytes that can be stored for
blocks. A unit is required, supported units: B,
KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on
powers-of-2, so 1KB is 1024B.
--tsdb.no-lockfile Do not create lockfile in TSDB data directory.
In any case, the lockfiles will be deleted on
next startup.
Expand Down
Loading

0 comments on commit 0f8f998

Please sign in to comment.