Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for caching bloomfilters #1204

Merged
merged 6 commits into from
Jan 27, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion table/builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func TestTableIndex(t *testing.T) {
keysCount := 10000
for _, opt := range opts {
builder := NewTableBuilder(opt)
filename := fmt.Sprintf("%s%c%d.sst", os.TempDir(), os.PathSeparator, rand.Int63())
filename := fmt.Sprintf("%s%c%d.sst", os.TempDir(), os.PathSeparator, rand.Uint32())
f, err := y.OpenSyncedFile(filename, true)
require.NoError(t, err)

Expand Down
96 changes: 83 additions & 13 deletions table/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package table

import (
"crypto/aes"
"encoding/binary"
"fmt"
"io"
"math"
Expand Down Expand Up @@ -81,7 +82,7 @@ type TableInterface interface {
DoesNotHave(hash uint64) bool
}

// Table represents a loaded table file with the info we have about it
// Table represents a loaded table file with the info we have about it.
type Table struct {
sync.Mutex

Expand All @@ -97,10 +98,11 @@ type Table struct {
smallest, biggest []byte // Smallest and largest keys (with timestamps).
id uint64 // file id, part of filename

bf *z.Bloom
Checksum []byte
// Stores the total size of key-values stored in this table (including the size on vlog).
estimatedSize uint64
indexStart int
indexLen int

IsInmemory bool // Set to true if the table is on level 0 and opened in memory.
opt *Options
Expand Down Expand Up @@ -146,6 +148,13 @@ func (t *Table) DecrRef() error {
if err := os.Remove(filename); err != nil {
return err
}
// Delete all blocks from the cache.
for i := range t.blockIndex {
t.opt.Cache.Del(t.blockCacheKey(i))
}
// Delete bloom filter from the cache.
t.opt.Cache.Del(t.bfCacheKey())

}
return nil
}
Expand Down Expand Up @@ -336,10 +345,12 @@ func (t *Table) readIndex() error {
// Read index size from the footer.
readPos -= 4
buf = t.readNoFail(readPos, 4)
indexLen := int(y.BytesToU32(buf))
t.indexLen = int(y.BytesToU32(buf))

// Read index.
readPos -= indexLen
data := t.readNoFail(readPos, indexLen)
readPos -= t.indexLen
t.indexStart = readPos
data := t.readNoFail(readPos, t.indexLen)

if err := y.VerifyChecksum(data, expectedChk); err != nil {
return y.Wrapf(err, "failed to verify checksum for table: %s", t.Filename())
Expand All @@ -358,11 +369,18 @@ func (t *Table) readIndex() error {
y.Check(err)

t.estimatedSize = index.EstimatedSize
if t.bf, err = z.JSONUnmarshal(index.BloomFilter); err != nil {
return y.Wrapf(err, "failed to unmarshal bloom filter for the table %d in Table.readIndex",
t.id)
}
t.blockIndex = index.Offsets

// Avoid the cost of unmarshalling the bloom filters if the cache is absent.
if t.opt.Cache != nil {
var bf *z.Bloom
if bf, err = z.JSONUnmarshal(index.BloomFilter); err != nil {
return y.Wrapf(err, "failed to unmarshal bloom filter for the table %d in Table.readIndex",
t.id)
}

t.opt.Cache.Set(t.bfCacheKey(), bf, int64(len(index.BloomFilter)))
}
return nil
}

Expand Down Expand Up @@ -443,10 +461,25 @@ func (t *Table) block(idx int) (*block, error) {
return blk, nil
}

func (t *Table) blockCacheKey(idx int) uint64 {
y.AssertTrue(t.ID() < math.MaxUint32)
// bfCacheKey returns the cache key for bloom filter.
func (t *Table) bfCacheKey() []byte {
y.AssertTrue(t.id < math.MaxUint32)
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(t.id))

// Without the "bf" prefix, we will have conflict with the blockCacheKey.
return append([]byte("bf"), buf...)
}

func (t *Table) blockCacheKey(idx int) []byte {
y.AssertTrue(t.id < math.MaxUint32)
y.AssertTrue(uint32(idx) < math.MaxUint32)
return (t.ID() << 32) | uint64(idx)

buf := make([]byte, 8)
// Assume t.ID does not overflow uint32.
binary.BigEndian.PutUint32(buf[:4], uint32(t.ID()))
binary.BigEndian.PutUint32(buf[4:], uint32(idx))
return buf
}

// EstimatedSize returns the total size of key-values stored in this table (including the
Expand All @@ -470,7 +503,44 @@ func (t *Table) ID() uint64 { return t.id }

// DoesNotHave returns true if (but not "only if") the table does not have the key hash.
// It does a bloom filter lookup.
func (t *Table) DoesNotHave(hash uint64) bool { return !t.bf.Has(hash) }
func (t *Table) DoesNotHave(hash uint64) bool {
var bf *z.Bloom

// Return fast if cache is absent.
if t.opt.Cache == nil {
bf, _ := t.readBloomFilter()
return !bf.Has(hash)
}

// Check if the bloomfilter exists in the cache.
if b, ok := t.opt.Cache.Get(t.bfCacheKey()); b != nil && ok {
bf = b.(*z.Bloom)
return !bf.Has(hash)
}

bf, sz := t.readBloomFilter()
t.opt.Cache.Set(t.bfCacheKey(), bf, int64(sz))
return !bf.Has(hash)
}

// readBloomFilter reads the bloom filter from the SST and returns its length
// along with the bloom filter.
func (t *Table) readBloomFilter() (*z.Bloom, int) {
// Read bloom filter from the SST.
data := t.readNoFail(t.indexStart, t.indexLen)
index := pb.TableIndex{}
var err error
// Decrypt the table index if it is encrypted.
if t.shouldDecrypt() {
data, err = t.decrypt(data)
y.Check(err)
}
y.Check(proto.Unmarshal(data, &index))

bf, err := z.JSONUnmarshal(index.BloomFilter)
y.Check(err)
return bf, len(index.BloomFilter)
}

// VerifyChecksum verifies checksum for all blocks of table. This function is called by
// OpenTable() function. This function is also called inside levelsController.VerifyChecksum().
Expand Down
8 changes: 2 additions & 6 deletions table/table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,9 @@ func buildTable(t *testing.T, keyValues [][]string, opts Options) *os.File {
defer b.Close()
// TODO: Add test for file garbage collection here. No files should be left after the tests here.

filename := fmt.Sprintf("%s%s%d.sst", os.TempDir(), string(os.PathSeparator), rand.Int63())
filename := fmt.Sprintf("%s%s%d.sst", os.TempDir(), string(os.PathSeparator), rand.Uint32())
f, err := y.CreateSyncedFile(filename, true)
if t != nil {
require.NoError(t, err)
} else {
y.Check(err)
}
require.NoError(t, err)

sort.Slice(keyValues, func(i, j int) bool {
return keyValues[i][0] < keyValues[j][0]
Expand Down