You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
11 const (
..
36 // DefaultMaxPointsPerBlock is the maximum number of points in an encoded
37 // block in a TSM file
38 DefaultMaxPointsPerBlock = 1000
..
42 )
43
44 // Config holds the configuration for the tsbd package.
45 type Config struct {
..
61 MaxPointsPerBlock int `toml:"max-points-per-block"`
278 // Plan returns a set of TSM files to rewrite for level 4 or higher. The planning returns
279 // multiple groups if possible to allow compactions to run concurrently.
280 func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup {
281 generations := c.findGenerations()
282
283 // first check if we should be doing a full compaction because nothing has been written in a long time
284 if c.CompactFullWriteColdDuration > 0 && time.Now().Sub(lastWrite) > c.CompactFullWriteColdDuration && len(generations) > 1 {
285 var tsmFiles []string
286 for i, group := range generations {
287 var skip bool
288
289 // Skip the file if it's over the max size and contains a full block and it does not have any tombstones
290 if group.size() > uint64(maxTSMFileSize) && c.FileStore.BlockCount(group.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock && !group.hasTombstones() {
291 skip = true
292 }
...
357 // Skip the file if it's over the max size and contains a full block or the generation is split
358 // over multiple files. In the latter case, that would mean the data in the file spilled over
359 // the 2GB limit.
360 if g.size() > uint64(maxTSMFileSize) && c.FileStore.BlockCount(g.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock || g.count() > 1 {
361 start = i + 1
362 }
...
403 // Skip the file if it's over the max size and it contains a full block
404 if gen.size() >= uint64(maxTSMFileSize) && c.FileStore.BlockCount(gen.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock && !gen.hasTombstones() {
405 startIndex++
406 continue
407 }
520 // WriteSnapshot will write a Cache snapshot to a new TSM files.
521 func (c *Compactor) WriteSnapshot(cache *Cache) ([]string, error) {
522 c.mu.RLock()
523 opened := c.opened
524 c.mu.RUnlock()
525
526 if !opened {
527 return nil, errSnapshotsDisabled
528 }
529
530 iter := NewCacheKeyIterator(cache, tsdb.DefaultMaxPointsPerBlock)
545 // Compact will write multiple smaller TSM files into 1 or more larger files
546 func (c *Compactor) compact(fast bool, tsmFiles []string) ([]string, error) {
547 size := c.Size
548 if size <= 0 {
549 size = tsdb.DefaultMaxPointsPerBlock
550 }
The text was updated successfully, but these errors were encountered:
Originally posted on mailing list.
When the TSM1 compaction process was updated in commit 756421ec4a5aeea994603175e73941e461c2b69a, the implementation used the default max points per block constant rather than the configured value for those who specify it:
There are 5 places affected in HEAD on master:
The text was updated successfully, but these errors were encountered: