diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index c09e5463..4791c554 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -2,25 +2,27 @@ name: Go on: push: - branches: [ main ] + branches: + - main + - lint pull_request: - branches: [ main ] + branches: + - main + - lint jobs: - ubuntu-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4.1.4 - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5.0.0 with: go-version: 1.19 - - name: Run Go Vet - run: | - go vet ./... + - name: Run golangci-lint + uses: golangci/golangci-lint-action@v5.0.0 - name: Run Go Fmt run: | @@ -36,20 +38,19 @@ jobs: - name: Run Unit Test run: go test -count 1 -v ./... - + windows-test: runs-on: windows-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4.1.4 - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5.0.0 with: go-version: 1.19 - - name: Run Go Vet - run: | - go vet ./... + - name: Run golangci-lint + uses: golangci/golangci-lint-action@v5.0.0 - name: Build run: go build -v diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..d77247e2 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,327 @@ +# This code is licensed under the terms of the MIT license https://opensource.org/license/mit +# Copyright (c) 2021 Marat Reymers + +## Golden config for golangci-lint v1.57.2 +# +# This is the best config for golangci-lint based on my experience and opinion. +# It is very strict, but not extremely strict. +# Feel free to adapt and change it for your needs. + +run: + # Timeout for analysis, e.g. 30s, 5m. + # Default: 1m + timeout: 3m + +# This file contains only configs which differ from defaults. +# All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml +linters-settings: + cyclop: + # The maximal code complexity to report. + # Default: 10 + max-complexity: 30 + # The maximal average package complexity. + # If it's higher than 0.0 (float) the check is enabled + # Default: 0.0 + package-average: 10.0 + + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: true + + exhaustive: + # Program elements to check for exhaustiveness. + # Default: [ switch ] + check: + - switch + - map + + exhaustruct: + # List of regular expressions to exclude struct packages and their names from checks. + # Regular expressions must match complete canonical struct package/name/structname. + # Default: [] + exclude: + # std libs + - "^net/http.Client$" + - "^net/http.Cookie$" + - "^net/http.Request$" + - "^net/http.Response$" + - "^net/http.Server$" + - "^net/http.Transport$" + - "^net/url.URL$" + - "^os/exec.Cmd$" + - "^reflect.StructField$" + # public libs + - "^github.com/Shopify/sarama.Config$" + - "^github.com/Shopify/sarama.ProducerMessage$" + - "^github.com/mitchellh/mapstructure.DecoderConfig$" + - "^github.com/prometheus/client_golang/.+Opts$" + - "^github.com/spf13/cobra.Command$" + - "^github.com/spf13/cobra.CompletionOptions$" + - "^github.com/stretchr/testify/mock.Mock$" + - "^github.com/testcontainers/testcontainers-go.+Request$" + - "^github.com/testcontainers/testcontainers-go.FromDockerfile$" + - "^golang.org/x/tools/go/analysis.Analyzer$" + - "^google.golang.org/protobuf/.+Options$" + - "^gopkg.in/yaml.v3.Node$" + + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: 100 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: 50 + # Ignore comments when counting lines. + # Default false + ignore-comments: true + + gocognit: + # Minimal code complexity to report. + # Default: 30 (but we recommend 10-20) + min-complexity: 30 + + gocritic: + # Settings passed to gocritic. + # The settings key is the name of a supported gocritic checker. + # The list of supported checkers can be find in https://go-critic.github.io/overview. + settings: + captLocal: + # Whether to restrict checker to params only. + # Default: true + paramsOnly: false + underef: + # Whether to skip (*x).method() calls where x is a pointer receiver. + # Default: true + skipRecvDeref: false + + gomnd: + # List of function patterns to exclude from analysis. + # Values always ignored: `time.Date`, + # `strconv.FormatInt`, `strconv.FormatUint`, `strconv.FormatFloat`, + # `strconv.ParseInt`, `strconv.ParseUint`, `strconv.ParseFloat`. + # Default: [] + ignored-functions: + - flag.Arg + - flag.Duration.* + - flag.Float.* + - flag.Int.* + - flag.Uint.* + - os.Chmod + - os.Mkdir.* + - os.OpenFile + - os.WriteFile + - prometheus.ExponentialBuckets.* + - prometheus.LinearBuckets + + gomodguard: + blocked: + # List of blocked modules. + # Default: [] + modules: + - github.com/golang/protobuf: + recommendations: + - google.golang.org/protobuf + reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules" + - github.com/satori/go.uuid: + recommendations: + - github.com/google/uuid + reason: "satori's package is not maintained" + - github.com/gofrs/uuid: + recommendations: + - github.com/gofrs/uuid/v5 + reason: "gofrs' package was not go module before v5" + + govet: + # Enable all analyzers. + # Default: false + enable-all: true + # Disable analyzers by name. + # Run `go tool vet help` to see all analyzers. + # Default: [] + disable: + - fieldalignment # too strict + # Settings per analyzer. + settings: + shadow: + # Whether to be strict about shadowing; can be noisy. + # Default: false + strict: true + + inamedparam: + # Skips check for interface methods with only a single parameter. + # Default: false + skip-single-param: true + + nakedret: + # Make an issue if func has more lines of code than this setting, and it has naked returns. + # Default: 30 + max-func-lines: 0 + + nolintlint: + # Exclude following linters from requiring an explanation. + # Default: [] + allow-no-explanation: [funlen, gocognit, lll] + # Enable to require an explanation of nonzero length after each nolint directive. + # Default: false + require-explanation: true + # Enable to require nolint directives to mention the specific linter being suppressed. + # Default: false + require-specific: true + + perfsprint: + # Optimizes into strings concatenation. + # Default: true + strconcat: false + + rowserrcheck: + # database/sql is always checked + # Default: [] + packages: + - github.com/jmoiron/sqlx + + tenv: + # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. + # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. + # Default: false + all: true + +linters: + disable-all: true + enable: + ## enabled by default + - errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases + - gosimple # specializes in simplifying a code + - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ineffassign # detects when assignments to existing variables are not used + - staticcheck # is a go vet on steroids, applying a ton of static analysis checks + - typecheck # like the front-end of a Go compiler, parses and type-checks Go code + - unused # checks for unused constants, variables, functions and types + ## disabled by default + - asasalint # checks for pass []any as any in variadic func(...any) + - asciicheck # checks that your code does not contain non-ASCII identifiers + - bidichk # checks for dangerous unicode character sequences + - bodyclose # checks whether HTTP response body is closed successfully + - copyloopvar # detects places where loop variables are copied + - cyclop # checks function and package cyclomatic complexity + - dupl # tool for code clone detection + - durationcheck # checks for two durations multiplied together + - errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error + - errorlint # finds code that will cause problems with the error wrapping scheme introduced in Go 1.13 + - execinquery # checks query string in Query function which reads your Go src files and warning it finds + - exhaustive # checks exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + - forbidigo # forbids identifiers + - funlen # tool for detection of long functions + - gocheckcompilerdirectives # validates go compiler directive comments (//go:) + # - gochecknoglobals # checks that no global variables exist + - gochecknoinits # checks that no init functions are present in Go code + - gochecksumtype # checks exhaustiveness on Go "sum types" + - gocognit # computes and checks the cognitive complexity of functions + - goconst # finds repeated strings that could be replaced by a constant + - gocritic # provides diagnostics that check for bugs, performance and style issues + - gocyclo # computes and checks the cyclomatic complexity of functions + - godot # checks if comments end in a period + # - goimports # in addition to fixing imports, goimports also formats your code in the same style as gofmt + - gomnd # detects magic numbers + - gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod + - gomodguard # allow and block lists linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations + - goprintffuncname # checks that printf-like functions are named with f at the end + - gosec # inspects source code for security problems + - intrange # finds places where for loops could make use of an integer range + - lll # reports long lines + - loggercheck # checks key value pairs for common logger libraries (kitlog,klog,logr,zap) + - makezero # finds slice declarations with non-zero initial length + - mirror # reports wrong mirror patterns of bytes/strings usage + - musttag # enforces field tags in (un)marshaled structs + - nakedret # finds naked returns in functions greater than a specified function length + - nestif # reports deeply nested if statements + - nilerr # finds the code that returns nil even if it checks that the error is not nil + - nilnil # checks that there is no simultaneous return of nil error and an invalid value + - noctx # finds sending http request without context.Context + - nolintlint # reports ill-formed or insufficient nolint directives + - nonamedreturns # reports all named returns + - nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL + - perfsprint # checks that fmt.Sprintf can be replaced with a faster alternative + - predeclared # finds code that shadows one of Go's predeclared identifiers + - promlinter # checks Prometheus metrics naming via promlint + - protogetter # reports direct reads from proto message fields when getters should be used + - reassign # checks that package variables are not reassigned + - revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint + - rowserrcheck # checks whether Err of rows is checked successfully + - sloglint # ensure consistent code style when using log/slog + - spancheck # checks for mistakes with OpenTelemetry/Census spans + - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed + - stylecheck # is a replacement for golint + - tenv # detects using os.Setenv instead of t.Setenv since Go1.17 + - testableexamples # checks if examples are testable (have an expected output) + - testifylint # checks usage of github.com/stretchr/testify + # - testpackage # makes you use a separate _test package + - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes + - unconvert # removes unnecessary type conversions + - unparam # reports unused function parameters + - usestdlibvars # detects the possibility to use variables/constants from the Go standard library + - wastedassign # finds wasted assignment statements + - whitespace # detects leading and trailing whitespace + + ## you may want to enable + #- decorder # checks declaration order and count of types, constants, variables and functions + #- exhaustruct # [highly recommend to enable] checks if all structure fields are initialized + #- gci # controls golang package import order and makes it always deterministic + #- ginkgolinter # [if you use ginkgo/gomega] enforces standards of using ginkgo and gomega + #- godox # detects FIXME, TODO and other comment keywords + #- goheader # checks is file header matches to pattern + #- inamedparam # [great idea, but too strict, need to ignore a lot of cases by default] reports interfaces with unnamed method parameters + #- interfacebloat # checks the number of methods inside an interface + #- ireturn # accept interfaces, return concrete types + #- prealloc # [premature optimization, but can be used in some cases] finds slice declarations that could potentially be preallocated + #- tagalign # checks that struct tags are well aligned + #- varnamelen # [great idea, but too many false positives] checks that the length of a variable's name matches its scope + #- wrapcheck # checks that errors returned from external packages are wrapped + #- zerologlint # detects the wrong usage of zerolog that a user forgets to dispatch zerolog.Event + + ## disabled + #- containedctx # detects struct contained context.Context field + #- contextcheck # [too many false positives] checks the function whether use a non-inherited context + #- depguard # [replaced by gomodguard] checks if package imports are in a list of acceptable packages + #- dogsled # checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) + #- dupword # [useless without config] checks for duplicate words in the source code + #- errchkjson # [don't see profit + I'm against of omitting errors like in the first example https://github.com/breml/errchkjson] checks types passed to the json encoding functions. Reports unsupported types and optionally reports occasions, where the check for the returned error can be omitted + #- forcetypeassert # [replaced by errcheck] finds forced type assertions + #- goerr113 # [too strict] checks the errors handling expressions + #- gofmt # [replaced by goimports] checks whether code was gofmt-ed + #- gofumpt # [replaced by goimports, gofumports is not available yet] checks whether code was gofumpt-ed + #- gosmopolitan # reports certain i18n/l10n anti-patterns in your Go codebase + #- grouper # analyzes expression groups + #- importas # enforces consistent import aliases + #- maintidx # measures the maintainability index of each function + #- misspell # [useless] finds commonly misspelled English words in comments + #- nlreturn # [too strict and mostly code is not more readable] checks for a new line before return and branch statements to increase code clarity + #- paralleltest # [too many false positives] detects missing usage of t.Parallel() method in your Go test + #- tagliatelle # checks the struct tags + #- thelper # detects golang test helpers without t.Helper() call and checks the consistency of test helpers + #- wsl # [too strict and mostly code is not more readable] whitespace linter forces you to use empty lines + +issues: + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 50 + + exclude-rules: + - source: "(noinspection|TODO)" + linters: [godot] + - source: "//noinspection" + linters: [gocritic] + - path: "_test\\.go" + linters: + - bodyclose + - dupl + - funlen + - goconst + - gosec + - noctx + - wrapcheck diff --git a/batch.go b/batch.go index 4abd647e..63332569 100644 --- a/batch.go +++ b/batch.go @@ -25,7 +25,7 @@ type Batch struct { options BatchOptions mu sync.RWMutex committed bool - batchId *snowflake.Node + batchID *snowflake.Node } // NewBatch creates a new Batch instance. @@ -41,7 +41,7 @@ func (db *DB) NewBatch(options BatchOptions) *Batch { if err != nil { panic(fmt.Sprintf("snowflake.NewNode(1) failed: %v", err)) } - batch.batchId = node + batch.batchID = node } batch.lock() return batch @@ -54,7 +54,7 @@ func makeBatch() interface{} { } return &Batch{ options: DefaultBatchOptions, - batchId: node, + batchID: node, } } @@ -67,9 +67,8 @@ func (b *Batch) init(rdonly, sync bool, disableWal bool, db *DB) *Batch { return b } -func (b *Batch) withPendingWrites() *Batch { +func (b *Batch) withPendingWrites() { b.pendingWrites = make(map[string]*LogRecord) - return b } func (b *Batch) reset() { @@ -277,9 +276,9 @@ func (b *Batch) Commit() error { if err := b.db.waitMemtableSpace(); err != nil { return err } - batchId := b.batchId.Generate() + batchID := b.batchID.Generate() // call memtable put batch - err := b.db.activeMem.putBatch(b.pendingWrites, batchId, b.options.WriteOptions) + err := b.db.activeMem.putBatch(b.pendingWrites, batchID, b.options.WriteOptions) if err != nil { return err } diff --git a/benchmark/bench_test.go b/benchmark/bench_test.go index 9980f743..0af153a9 100644 --- a/benchmark/bench_test.go +++ b/benchmark/bench_test.go @@ -1,6 +1,7 @@ package benchmark import ( + "errors" "os" "testing" @@ -35,6 +36,7 @@ func BenchmarkPut(b *testing.B) { for i := 0; i < b.N; i++ { err := db.Put(util.GetTestKey(i), util.RandomValue(1024)) + //nolint:testifylint // benchmark assert.Nil(b, err) } } @@ -44,6 +46,7 @@ func BenchmarkGet(b *testing.B) { defer destroy() for i := 0; i < 1000000; i++ { err := db.Put(util.GetTestKey(i), util.RandomValue(128)) + //nolint:testifylint // benchmark assert.Nil(b, err) } b.ReportAllocs() @@ -53,7 +56,7 @@ func BenchmarkGet(b *testing.B) { val, err := db.Get(util.GetTestKey(i)) if err == nil { assert.NotNil(b, val) - } else if err != lotusdb.ErrKeyNotFound { + } else if errors.Is(err, lotusdb.ErrKeyNotFound) { b.Error(err) } } diff --git a/bptree.go b/bptree.go index 5c19a7c4..687ab33d 100644 --- a/bptree.go +++ b/bptree.go @@ -3,7 +3,9 @@ package lotusdb import ( "bytes" "context" + "errors" "fmt" + "os" "path/filepath" "github.com/rosedblabs/diskhash" @@ -12,7 +14,12 @@ import ( "golang.org/x/sync/errgroup" ) -// bucket name for bolt db to store index data +const ( + defaultFileMode os.FileMode = 0600 + defaultInitialMmapSize int = 1024 +) + +// bucket name for bolt db to store index data. var indexBucketName = []byte("lotusdb-index") // BPTree is the BoltDB index implementation. @@ -31,10 +38,10 @@ func openBTreeIndex(options indexOptions, _ ...diskhash.MatchKeyFunc) (*BPTree, // open bolt db tree, err := bbolt.Open( filepath.Join(options.dirPath, fmt.Sprintf(indexFileExt, i)), - 0600, + defaultFileMode, &bbolt.Options{ NoSync: true, - InitialMmapSize: 1024, + InitialMmapSize: defaultInitialMmapSize, FreelistType: bbolt.FreelistMapType, }, ) @@ -47,10 +54,10 @@ func openBTreeIndex(options indexOptions, _ ...diskhash.MatchKeyFunc) (*BPTree, if err != nil { return nil, err } - if _, err := tx.CreateBucketIfNotExists(indexBucketName); err != nil { + if _, err = tx.CreateBucketIfNotExists(indexBucketName); err != nil { return nil, err } - if err := tx.Commit(); err != nil { + if err = tx.Commit(); err != nil { return nil, err } trees[i] = tree @@ -116,7 +123,7 @@ func (bt *BPTree) PutBatch(positions []*KeyPosition, _ ...diskhash.MatchKeyFunc) default: encPos := record.position.Encode() if err := bucket.Put(record.key, encPos); err != nil { - if err == bbolt.ErrKeyRequired { + if errors.Is(err, bbolt.ErrKeyRequired) { return ErrKeyIsEmpty } return err @@ -200,7 +207,7 @@ func (bt *BPTree) Sync() error { return nil } -// bptreeIterator implement baseIterator +// bptreeIterator implement baseIterator. type bptreeIterator struct { key []byte value []byte @@ -209,7 +216,7 @@ type bptreeIterator struct { options IteratorOptions } -// create a boltdb based btree iterator +// create a boltdb based btree iterator. func newBptreeIterator(tx *bbolt.Tx, options IteratorOptions) *bptreeIterator { return &bptreeIterator{ cursor: tx.Bucket(indexBucketName).Cursor(), diff --git a/bptree_test.go b/bptree_test.go index e25770ff..2480293f 100644 --- a/bptree_test.go +++ b/bptree_test.go @@ -10,6 +10,7 @@ import ( "github.com/cespare/xxhash/v2" "github.com/rosedblabs/wal" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.etcd.io/bbolt" ) @@ -60,7 +61,7 @@ func Test_openIndexBoltDB(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := os.MkdirAll(tt.options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(tt.options.dirPath) }() @@ -74,7 +75,6 @@ func Test_openIndexBoltDB(t *testing.T) { assert.Equal(t, tt.want.options.partitionNum, got.options.partitionNum) assert.Equal(t, len(tt.want.trees), len(got.trees)) }) - } } @@ -95,13 +95,13 @@ func testbptreeGet(t *testing.T, partitionNum int) { } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() bt, err := openBTreeIndex(options) - assert.Nil(t, err) + require.NoError(t, err) var keyPositions []*KeyPosition keyPositions = append(keyPositions, &KeyPosition{ key: []byte("exist"), @@ -109,7 +109,7 @@ func testbptreeGet(t *testing.T, partitionNum int) { position: &wal.ChunkPosition{}, }) err = bt.PutBatch(keyPositions) - assert.Nil(t, err) + require.NoError(t, err) tests := []struct { name string @@ -122,9 +122,10 @@ func testbptreeGet(t *testing.T, partitionNum int) { {"exist", []byte("exist"), true, false}, {"len(key)=0", []byte(""), false, true}, } + var got *KeyPosition for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := bt.Get(tt.key) + got, err = bt.Get(tt.key) if (err != nil) != tt.wantErr { t.Errorf("BPTree.Get() error = %v, wantErr %v", err, tt.wantErr) return @@ -151,13 +152,13 @@ func testbptreePutbatch(t *testing.T, partitionNum int) { } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() bt, err := openBTreeIndex(options) - assert.Nil(t, err) + require.NoError(t, err) var keyPositions []*KeyPosition keyPositions = append(keyPositions, &KeyPosition{ @@ -187,7 +188,7 @@ func testbptreePutbatch(t *testing.T, partitionNum int) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := bt.PutBatch(tt.positions); (err != nil) != tt.wantErr { + if err = bt.PutBatch(tt.positions); (err != nil) != tt.wantErr { t.Errorf("BPTree.PutBatch() error = %v, wantErr %v", err, tt.wantErr) } }) @@ -211,13 +212,13 @@ func testbptreeDeletebatch(t *testing.T, partitionNum int) { } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() bt, err := openBTreeIndex(options) - assert.Nil(t, err) + require.NoError(t, err) var keys [][]byte keys = append(keys, nil, []byte("not-exist"), []byte("exist"), []byte("")) var keyPositions []*KeyPosition @@ -228,7 +229,7 @@ func testbptreeDeletebatch(t *testing.T, partitionNum int) { }) err = bt.PutBatch(keyPositions) - assert.Nil(t, err) + require.NoError(t, err) tests := []struct { name string @@ -242,7 +243,7 @@ func testbptreeDeletebatch(t *testing.T, partitionNum int) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := bt.DeleteBatch(tt.keys); (err != nil) != tt.wantErr { + if err = bt.DeleteBatch(tt.keys); (err != nil) != tt.wantErr { t.Errorf("BPTree.DeleteBatch() error = %v, wantErr %v", err, tt.wantErr) } }) @@ -266,16 +267,16 @@ func testbptreeClose(t *testing.T, partitionNum int) { } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() bt, err := openBTreeIndex(options) - assert.Nil(t, err) + require.NoError(t, err) err = bt.Close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestBPTree_Sync_1(t *testing.T) { @@ -295,14 +296,15 @@ func testbptreeSync(t *testing.T, partitionNum int) { } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() bt, err := openBTreeIndex(options) - assert.Nil(t, err) + require.NoError(t, err) + assert.NotNil(t, bt) err = bt.Sync() - assert.Nil(t, err) + assert.NoError(t, err) } func Test_bptreeIterator(t *testing.T) { @@ -314,12 +316,12 @@ func Test_bptreeIterator(t *testing.T) { } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() bt, err := openBTreeIndex(options) - assert.Nil(t, err) + require.NoError(t, err) m := map[string]*wal.ChunkPosition{ "key 0": {SegmentId: 0, BlockNumber: 0, ChunkOffset: 0, ChunkSize: 0}, "key 1": {SegmentId: 1, BlockNumber: 1, ChunkOffset: 1, ChunkSize: 1}, @@ -345,7 +347,19 @@ func Test_bptreeIterator(t *testing.T) { }, ) - keyPositions2 = append(keyPositions, &KeyPosition{ + keyPositions2 = append(keyPositions2, &KeyPosition{ + key: []byte("key 0"), + partition: 0, + position: &wal.ChunkPosition{SegmentId: 0, BlockNumber: 0, ChunkOffset: 0, ChunkSize: 0}, + }, &KeyPosition{ + key: []byte("key 1"), + partition: 0, + position: &wal.ChunkPosition{SegmentId: 1, BlockNumber: 1, ChunkOffset: 1, ChunkSize: 1}, + }, &KeyPosition{ + key: []byte("key 2"), + partition: 0, + position: &wal.ChunkPosition{SegmentId: 2, BlockNumber: 2, ChunkOffset: 2, ChunkSize: 2}, + }, &KeyPosition{ key: []byte("abc 0"), partition: 0, position: &wal.ChunkPosition{SegmentId: 3, BlockNumber: 3, ChunkOffset: 3, ChunkSize: 3}, @@ -360,17 +374,17 @@ func Test_bptreeIterator(t *testing.T) { }) err = bt.PutBatch(keyPositions) - assert.Nil(t, err) + require.NoError(t, err) tree := bt.trees[0] tx, err := tree.Begin(true) - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions := IteratorOptions{ Reverse: false, } itr := newBptreeIterator(tx, iteratorOptions) - assert.Nil(t, err) + require.NoError(t, err) var prev []byte itr.Rewind() for itr.Valid() { @@ -381,17 +395,17 @@ func Test_bptreeIterator(t *testing.T) { itr.Next() } err = itr.Close() - assert.Nil(t, err) + require.NoError(t, err) tx, err = tree.Begin(true) - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions = IteratorOptions{ Reverse: true, } prev = nil itr = newBptreeIterator(tx, iteratorOptions) - assert.Nil(t, err) + require.NoError(t, err) itr.Rewind() for itr.Valid() { currKey := itr.Key() @@ -409,17 +423,17 @@ func Test_bptreeIterator(t *testing.T) { itr.Seek([]byte("aye 2")) assert.False(t, itr.Valid()) err = itr.Close() - assert.Nil(t, err) + require.NoError(t, err) tx, err = tree.Begin(true) - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions = IteratorOptions{ Reverse: false, } prev = nil itr = newBptreeIterator(tx, iteratorOptions) - assert.Nil(t, err) + require.NoError(t, err) itr.Rewind() for itr.Valid() { currKey := itr.Key() @@ -437,35 +451,35 @@ func Test_bptreeIterator(t *testing.T) { itr.Seek([]byte("aye 2")) assert.Equal(t, []byte("key 0"), itr.Key()) err = itr.Close() - assert.Nil(t, err) + require.NoError(t, err) // prefix err = bt.PutBatch(keyPositions2) - assert.Nil(t, err) + require.NoError(t, err) tx, err = tree.Begin(true) - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions = IteratorOptions{ Reverse: false, Prefix: []byte("not valid"), } itr = newBptreeIterator(tx, iteratorOptions) - assert.Nil(t, err) + require.NoError(t, err) itr.Rewind() assert.False(t, itr.Valid()) err = itr.Close() - assert.Nil(t, err) + require.NoError(t, err) tx, err = tree.Begin(true) - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions = IteratorOptions{ Reverse: false, Prefix: []byte("abc"), } itr = newBptreeIterator(tx, iteratorOptions) - assert.Nil(t, err) + require.NoError(t, err) itr.Rewind() assert.True(t, itr.Valid()) @@ -475,6 +489,5 @@ func Test_bptreeIterator(t *testing.T) { itr.Next() } err = itr.Close() - assert.Nil(t, err) - + assert.NoError(t, err) } diff --git a/db.go b/db.go index ec99677e..85684ec5 100644 --- a/db.go +++ b/db.go @@ -2,6 +2,7 @@ package lotusdb import ( "context" + "errors" "fmt" "io" "log" @@ -65,7 +66,7 @@ func Open(options Options) (*DB, error) { // create data directory if not exist if _, err := os.Stat(options.DirPath); err != nil { - if err := os.MkdirAll(options.DirPath, os.ModePerm); err != nil { + if err = os.MkdirAll(options.DirPath, os.ModePerm); err != nil { return nil, err } } @@ -196,7 +197,7 @@ func (db *DB) Sync() error { return nil } -// Put put with defaultWriteOptions +// Put put with defaultWriteOptions. func (db *DB) Put(key []byte, value []byte) error { return db.PutWithOptions(key, value, DefaultWriteOptions) } @@ -205,7 +206,10 @@ func (db *DB) Put(key []byte, value []byte) error { // Actually, it will open a new batch and commit it. // You can think the batch has only one Put operation. func (db *DB) PutWithOptions(key []byte, value []byte, options WriteOptions) error { - batch := db.batchPool.Get().(*Batch) + batch, ok := db.batchPool.Get().(*Batch) + if !ok { + panic("batchPoll.Get failed") + } batch.options.WriteOptions = options defer func() { batch.reset() @@ -226,7 +230,10 @@ func (db *DB) PutWithOptions(key []byte, value []byte, options WriteOptions) err // Actually, it will open a new batch and commit it. // You can think the batch has only one Get operation. func (db *DB) Get(key []byte) ([]byte, error) { - batch := db.batchPool.Get().(*Batch) + batch, ok := db.batchPool.Get().(*Batch) + if !ok { + panic("batchPoll.Get failed") + } batch.init(true, false, true, db) defer func() { _ = batch.Commit() @@ -236,7 +243,7 @@ func (db *DB) Get(key []byte) ([]byte, error) { return batch.Get(key) } -// Delete delete with defaultWriteOptions +// Delete delete with defaultWriteOptions. func (db *DB) Delete(key []byte) error { return db.DeleteWithOptions(key, DefaultWriteOptions) } @@ -245,7 +252,10 @@ func (db *DB) Delete(key []byte) error { // Actually, it will open a new batch and commit it. // You can think the batch has only one Delete operation. func (db *DB) DeleteWithOptions(key []byte, options WriteOptions) error { - batch := db.batchPool.Get().(*Batch) + batch, ok := db.batchPool.Get().(*Batch) + if !ok { + panic("batchPoll.Get failed") + } batch.options.WriteOptions = options defer func() { batch.reset() @@ -266,7 +276,10 @@ func (db *DB) DeleteWithOptions(key []byte, options WriteOptions) error { // Actually, it will open a new batch and commit it. // You can think the batch has only one Exist operation. func (db *DB) Exist(key []byte) (bool, error) { - batch := db.batchPool.Get().(*Batch) + batch, ok := db.batchPool.Get().(*Batch) + if !ok { + panic("batchPoll.Get failed") + } batch.init(true, false, true, db) defer func() { _ = batch.Commit() @@ -282,16 +295,16 @@ func validateOptions(options *Options) error { return ErrDBDirectoryISEmpty } if options.MemtableSize <= 0 { - options.MemtableSize = 64 << 20 // 64MB + options.MemtableSize = DefaultOptions.MemtableSize } if options.MemtableNums <= 0 { - options.MemtableNums = 15 + options.MemtableNums = DefaultOptions.MemtableNums } if options.PartitionNum <= 0 { - options.PartitionNum = 5 + options.PartitionNum = DefaultOptions.PartitionNum } if options.ValueLogFileSize <= 0 { - options.ValueLogFileSize = 1 << 30 // 1GB + options.ValueLogFileSize = DefaultOptions.ValueLogFileSize } // assure ValueLogFileSize >= MemtableSize if options.ValueLogFileSize < int64(options.MemtableSize) { @@ -329,7 +342,7 @@ func (db *DB) waitMemtableSpace() error { case db.flushChan <- db.activeMem: db.immuMems = append(db.immuMems, db.activeMem) options := db.activeMem.options - options.tableId++ + options.tableID++ // open a new memtable for writing table, err := openMemtable(options) if err != nil { @@ -377,7 +390,7 @@ func (db *DB) flushMemtable(table *memtable) { } // sync the value log - if err := db.vlog.sync(); err != nil { + if err = db.vlog.sync(); err != nil { log.Println("vlog sync failed:", err) return } @@ -390,7 +403,7 @@ func (db *DB) flushMemtable(table *memtable) { putMatchKeys[i] = MatchKeyFunc(db, keyPos[i].key, nil, nil) } } - if err := db.index.PutBatch(keyPos, putMatchKeys...); err != nil { + if err = db.index.PutBatch(keyPos, putMatchKeys...); err != nil { log.Println("index PutBatch failed:", err) return } @@ -402,18 +415,18 @@ func (db *DB) flushMemtable(table *memtable) { deleteMatchKeys[i] = MatchKeyFunc(db, deletedKeys[i], nil, nil) } } - if err := db.index.DeleteBatch(deletedKeys, deleteMatchKeys...); err != nil { + if err = db.index.DeleteBatch(deletedKeys, deleteMatchKeys...); err != nil { log.Println("index DeleteBatch failed:", err) return } // sync the index - if err := db.index.Sync(); err != nil { + if err = db.index.Sync(); err != nil { log.Println("index sync failed:", err) return } // delete the wal - if err := table.deleteWAl(); err != nil { + if err = table.deleteWAl(); err != nil { log.Println("delete wal failed:", err) return } @@ -422,9 +435,9 @@ func (db *DB) flushMemtable(table *memtable) { db.mu.Lock() if table == db.activeMem { options := db.activeMem.options - options.tableId++ + options.tableID++ // open a new memtable for writing - table, err := openMemtable(options) + table, err = openMemtable(options) if err != nil { panic("flush activate memtable wrong") } @@ -460,6 +473,8 @@ func (db *DB) listenMemtableFlush() { // Compact will iterate all values in vlog, and write the valid values to a new vlog file. // Then replace the old vlog file with the new one, and delete the old one. +// +//nolint:gocognit func (db *DB) Compact() error { db.flushLock.Lock() defer db.flushLock.Unlock() @@ -494,7 +509,7 @@ func (db *DB) Compact() error { count++ chunk, pos, err := reader.Next() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } _ = newVlogFile.Delete() @@ -525,7 +540,7 @@ func (db *DB) Compact() error { } if count%db.vlog.options.compactBatchCount == 0 { - err := db.rewriteValidRecords(newVlogFile, validRecords, part) + err = db.rewriteValidRecords(newVlogFile, validRecords, part) if err != nil { _ = newVlogFile.Delete() return err diff --git a/db_test.go b/db_test.go index 57fc8cef..dbb55eeb 100644 --- a/db_test.go +++ b/db_test.go @@ -29,10 +29,11 @@ func TestDBOpen(t *testing.T) { t.Run("Valid options", func(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-open") - assert.Nil(t, err) + require.NoError(t, err) + assert.NotEqual(t, "", path) options.DirPath = path db, err := Open(options) - assert.Nil(t, err) + require.NoError(t, err) defer destroyDB(db) assert.NotNil(t, db, "DB should not be nil") @@ -44,7 +45,7 @@ func TestDBOpen(t *testing.T) { assert.NotNil(t, db.flushChan, "DB flushChan should not be nil") err = db.Close() - assert.Nil(t, err) + assert.NoError(t, err) }) t.Run("Invalid options - no directory path", func(t *testing.T) { options := DefaultOptions @@ -57,42 +58,43 @@ func TestDBOpen(t *testing.T) { func TestDBClose(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-close") - assert.Nil(t, err) + require.NoError(t, err) + assert.NotEqual(t, "", path) options.DirPath = path db, err := Open(options) defer destroyDB(db) - assert.Nil(t, err) + require.NoError(t, err) t.Run("test close db", func(t *testing.T) { - err := db.Close() - assert.Nil(t, err) + err = db.Close() + assert.NoError(t, err) }) } func TestDBSync(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-sync") - assert.Nil(t, err) + require.NoError(t, err) options.DirPath = path db, err := Open(options) - assert.Nil(t, err) + require.NoError(t, err) defer destroyDB(db) t.Run("test sync db", func(t *testing.T) { - err := db.Sync() - assert.Nil(t, err) + err = db.Sync() + assert.NoError(t, err) }) err = db.Close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestDBPut(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-put") - assert.Nil(t, err) + require.NoError(t, err) options.DirPath = path db, err := Open(options) - assert.Nil(t, err) + require.NoError(t, err) defer destroyDB(db) type testLog struct { @@ -136,17 +138,19 @@ func TestDBPut(t *testing.T) { } err = db.Close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestDBGet(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-get") - assert.Nil(t, err) + if assert.NoError(t, err) { + assert.NotEqual(t, "", path) + } options.DirPath = path db, err := Open(options) - assert.Nil(t, err) + require.NoError(t, err) defer destroyDB(db) type testLog struct { @@ -186,16 +190,16 @@ func TestDBGet(t *testing.T) { } for _, log := range logs { - err := db.PutWithOptions(log.key, log.value, WriteOptions{ + errPutWithOptions := db.PutWithOptions(log.key, log.value, WriteOptions{ Sync: true, DisableWal: false, }) - assert.Equal(t, err != nil, log.wantErr) + assert.Equal(t, log.wantErr, errPutWithOptions != nil) } - + var value []byte for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - value, err := db.Get(tt.args.log.key) + value, err = db.Get(tt.args.log.key) if (err != nil) != tt.wantErr { t.Errorf("Get(key) error = %v, wantErr = %v", err, tt.wantErr) } @@ -204,17 +208,17 @@ func TestDBGet(t *testing.T) { } err = db.Close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestDBDelete(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-delete") - assert.Nil(t, err) + require.NoError(t, err) options.DirPath = path db, err := Open(options) - assert.Nil(t, err) + require.NoError(t, err) defer destroyDB(db) type testLog struct { @@ -248,40 +252,41 @@ func TestDBDelete(t *testing.T) { } for _, log := range logs { - err := db.PutWithOptions(log.key, log.value, WriteOptions{ + errPutWithOptions := db.PutWithOptions(log.key, log.value, WriteOptions{ Sync: true, DisableWal: false, }) - assert.Nil(t, err) + require.NoError(t, errPutWithOptions) } + var value []byte for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := db.DeleteWithOptions(tt.args.log.key, WriteOptions{ + errDeleteWithOptions := db.DeleteWithOptions(tt.args.log.key, WriteOptions{ Sync: true, DisableWal: false, }) - if (err != nil) != tt.wantErr { + if (errDeleteWithOptions != nil) != tt.wantErr { t.Errorf("Get(key) error = %v, wantErr = %v", err, tt.wantErr) } - value, err := db.Get(tt.args.log.key) - assert.NotNil(t, err) + value, err = db.Get(tt.args.log.key) + require.Error(t, err) assert.Equal(t, []byte(nil), value) }) } err = db.Close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestDBExist(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-exist") - assert.Nil(t, err) + require.NoError(t, err) options.DirPath = path db, err := Open(options) - assert.Nil(t, err) + require.NoError(t, err) defer destroyDB(db) type testLog struct { @@ -321,16 +326,17 @@ func TestDBExist(t *testing.T) { } for _, log := range logs { - err := db.PutWithOptions(log.key, log.value, WriteOptions{ + errPutWithOptions := db.PutWithOptions(log.key, log.value, WriteOptions{ Sync: true, DisableWal: false, }) - assert.Equal(t, err != nil, log.wantErr) + assert.Equal(t, log.wantErr, errPutWithOptions != nil) } + var isExist bool for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - isExist, err := db.Exist(tt.args.log.key) + isExist, err = db.Exist(tt.args.log.key) if (err != nil) != tt.wantErr { t.Errorf("Get(key) error = %v, wantErr = %v", err, tt.wantErr) } @@ -339,17 +345,17 @@ func TestDBExist(t *testing.T) { } err = db.Close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestDBFlushMemTables(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-flush") - assert.Nil(t, err) + require.NoError(t, err) options.DirPath = path db, err := Open(options) - assert.Nil(t, err) + require.NoError(t, err) defer destroyDB(db) type testLog struct { @@ -380,24 +386,24 @@ func TestDBFlushMemTables(t *testing.T) { t.Run("test flushMemtables", func(t *testing.T) { time.Sleep(time.Second * 1) + var value []byte for _, log := range logs { - value, err := getValueFromVlog(db, log.key) - assert.Nil(t, err) + value, err = getValueFromVlog(db, log.key) + require.NoError(t, err) assert.Equal(t, log.value, value) } }) - } func TestDBCompact(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-compact") - assert.Nil(t, err) + require.NoError(t, err) options.DirPath = path options.CompactBatchCount = 2 << 5 db, err := Open(options) - assert.Nil(t, err) + require.NoError(t, err) defer destroyDB(db) type testLog struct { @@ -437,24 +443,24 @@ func TestDBCompact(t *testing.T) { }) } t.Run("test compaction", func(t *testing.T) { + var size, sizeCompact int64 time.Sleep(time.Millisecond * 500) - size, err := util.DirSize(db.options.DirPath) - assert.Nil(t, err) + size, err = util.DirSize(db.options.DirPath) + require.NoError(t, err) err = db.Compact() - assert.Nil(t, err) - - sizeCompact, err := util.DirSize(db.options.DirPath) - assert.Nil(t, err) - assert.Greater(t, size, sizeCompact) + require.NoError(t, err) + sizeCompact, err = util.DirSize(db.options.DirPath) + require.NoError(t, err) + require.Greater(t, size, sizeCompact) + var value []byte for _, log := range testlogs { - value, err := getValueFromVlog(db, log.key) - assert.Nil(t, err) + value, err = getValueFromVlog(db, log.key) + require.NoError(t, err) assert.Equal(t, log.value, value) } }) - } func getValueFromVlog(db *DB, key []byte) ([]byte, error) { @@ -502,13 +508,13 @@ func TestDBMultiClients(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-multi-client") - assert.Nil(t, err) + require.NoError(t, err) options.DirPath = path db, err := Open(options) - assert.Nil(t, err) + require.NoError(t, err) defer destroyDB(db) - t.Run("multi client running", func(t *testing.T) { + t.Run("multi client running", func(_ *testing.T) { var wg sync.WaitGroup // 2 clients to put @@ -569,27 +575,28 @@ func TestDBMultiClients(t *testing.T) { }) } +//nolint:gocognit func TestDBIterator(t *testing.T) { options := DefaultOptions path, err := os.MkdirTemp("", "db-test-iter") - assert.Nil(t, err) + require.NoError(t, err) options.DirPath = path db, err := Open(options) defer destroyDB(db) - assert.Nil(t, err) + require.NoError(t, err) db.immuMems = make([]*memtable, 3) opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, walBlockCache: DefaultOptions.BlockCache, } for i := 0; i < 3; i++ { - opts.tableId = uint32(i) + opts.tableID = uint32(i) db.immuMems[i], err = openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) } logRecord0 := []*LogRecord{ // 0 @@ -620,21 +627,21 @@ func TestDBIterator(t *testing.T) { {[]byte("abc3"), []byte("v3_1"), LogRecordNormal, 0}, } - list2Map := func(in []*LogRecord) (out map[string]*LogRecord) { - out = make(map[string]*LogRecord) + list2Map := func(in []*LogRecord) map[string]*LogRecord { + out := make(map[string]*LogRecord) for _, v := range in { out[string(v.Key)] = v } - return + return out } err = db.immuMems[0].putBatch(list2Map(logRecord0), 0, DefaultWriteOptions) - assert.Nil(t, err) + require.NoError(t, err) err = db.immuMems[1].putBatch(list2Map(logRecord1), 1, DefaultWriteOptions) - assert.Nil(t, err) + require.NoError(t, err) err = db.immuMems[2].putBatch(list2Map(logRecord2), 2, DefaultWriteOptions) - assert.Nil(t, err) + require.NoError(t, err) err = db.activeMem.putBatch(list2Map(logRecord3), 3, DefaultWriteOptions) - assert.Nil(t, err) + require.NoError(t, err) expectedKey := [][]byte{ []byte("k1"), @@ -648,7 +655,7 @@ func TestDBIterator(t *testing.T) { Reverse: false, Prefix: []byte("k"), }) - assert.Nil(t, err) + require.NoError(t, err) var i int iter.Rewind() i = 0 @@ -656,7 +663,6 @@ func TestDBIterator(t *testing.T) { if !iter.itrs[0].options.Reverse { assert.Equal(t, expectedKey[i], iter.Key()) assert.Equal(t, expectedVal[i], iter.Value()) - } else { assert.Equal(t, expectedKey[2-i], iter.Key()) assert.Equal(t, expectedVal[2-i], iter.Value()) @@ -671,23 +677,21 @@ func TestDBIterator(t *testing.T) { if !iter.itrs[0].options.Reverse { assert.Equal(t, expectedKey[i], iter.Key()) assert.Equal(t, expectedVal[i], iter.Value()) - } else { assert.Equal(t, expectedKey[2-i], iter.Key()) assert.Equal(t, expectedVal[2-i], iter.Value()) - } i++ iter.Next() } err = iter.Close() - assert.Nil(t, err) + require.NoError(t, err) iter, err = db.NewIterator(IteratorOptions{ Reverse: true, Prefix: []byte("k"), }) - assert.Nil(t, err) + require.NoError(t, err) iter.Rewind() i = 0 @@ -695,11 +699,9 @@ func TestDBIterator(t *testing.T) { if !iter.itrs[0].options.Reverse { assert.Equal(t, expectedKey[i], iter.Key()) assert.Equal(t, expectedVal[i], iter.Value()) - } else { assert.Equal(t, expectedKey[1-i], iter.Key()) assert.Equal(t, expectedVal[1-i], iter.Value()) - } i++ iter.Next() @@ -711,17 +713,15 @@ func TestDBIterator(t *testing.T) { if !iter.itrs[0].options.Reverse { assert.Equal(t, expectedKey[i], iter.Key()) assert.Equal(t, expectedVal[i], iter.Value()) - } else { assert.Equal(t, expectedKey[1-i], iter.Key()) assert.Equal(t, expectedVal[1-i], iter.Value()) - } i++ iter.Next() } err = iter.Close() - assert.Nil(t, err) + require.NoError(t, err) for j := 0; j < 3; j++ { db.flushMemtable(db.immuMems[0]) @@ -729,7 +729,7 @@ func TestDBIterator(t *testing.T) { Reverse: false, Prefix: []byte("k"), }) - assert.Nil(t, err) + require.NoError(t, err) iter.Rewind() i = 0 @@ -745,13 +745,13 @@ func TestDBIterator(t *testing.T) { i++ } err = iter.Close() - assert.Nil(t, err) + require.NoError(t, err) iter, err = db.NewIterator(IteratorOptions{ Reverse: true, Prefix: []byte("k"), }) - assert.Nil(t, err) + require.NoError(t, err) iter.Rewind() i = 0 @@ -767,14 +767,14 @@ func TestDBIterator(t *testing.T) { i++ } err = iter.Close() - assert.Nil(t, err) + require.NoError(t, err) } iter, err = db.NewIterator(IteratorOptions{ Reverse: false, Prefix: []byte("k"), }) - assert.Nil(t, err) + require.NoError(t, err) iter.Seek([]byte("k3")) var prev []byte @@ -785,13 +785,13 @@ func TestDBIterator(t *testing.T) { iter.Next() } err = iter.Close() - assert.Nil(t, err) + require.NoError(t, err) // unsupported type options = DefaultOptions options.IndexType = Hash db, err = Open(options) - assert.Nil(t, err) + require.NoError(t, err) itr, err := db.NewIterator(IteratorOptions{Reverse: false}) assert.Equal(t, ErrDBIteratorUnsupportedTypeHASH, err) assert.Nil(t, itr) diff --git a/hashtable.go b/hashtable.go index ab60c74e..2f7387d3 100644 --- a/hashtable.go +++ b/hashtable.go @@ -12,7 +12,8 @@ import ( "golang.org/x/sync/errgroup" ) -// diskhash requires fixed-size value, so we set the slotValueLength to `binary.MaxVarintLen32*3 + binary.MaxVarintLen64`. +// diskhash requires fixed-size value +// so we set the slotValueLength to `binary.MaxVarintLen32*3 + binary.MaxVarintLen64`. // This is the maximum length after wal.chunkPosition encoding. const slotValueLength = binary.MaxVarintLen32*3 + binary.MaxVarintLen64 @@ -45,7 +46,7 @@ func openHashIndex(options indexOptions) (*HashTable, error) { }, nil } -// PutBatch put batch records to index +// PutBatch put batch records to index. func (ht *HashTable) PutBatch(positions []*KeyPosition, matchKeyFunc ...diskhash.MatchKeyFunc) error { if len(positions) == 0 { return nil @@ -88,7 +89,7 @@ func (ht *HashTable) PutBatch(positions []*KeyPosition, matchKeyFunc ...diskhash return g.Wait() } -// Get chunk position by key +// Get chunk position by key. func (ht *HashTable) Get(key []byte, matchKeyFunc ...diskhash.MatchKeyFunc) (*KeyPosition, error) { if len(key) == 0 { return nil, ErrKeyIsEmpty @@ -99,11 +100,11 @@ func (ht *HashTable) Get(key []byte, matchKeyFunc ...diskhash.MatchKeyFunc) (*Ke if err != nil { return nil, err } - // hashTable will not use keyPosition, so return nil, nil + //nolint:nilnil // hashTable will not use keyPosition, so return nil, nil return nil, nil } -// DeleteBatch delete batch records from index +// DeleteBatch delete batch records from index. func (ht *HashTable) DeleteBatch(keys [][]byte, matchKeyFunc ...diskhash.MatchKeyFunc) error { if len(keys) == 0 { return nil @@ -143,7 +144,7 @@ func (ht *HashTable) DeleteBatch(keys [][]byte, matchKeyFunc ...diskhash.MatchKe return g.Wait() } -// Sync sync index data to disk +// Sync sync index data to disk. func (ht *HashTable) Sync() error { for _, table := range ht.tables { err := table.Sync() @@ -154,7 +155,7 @@ func (ht *HashTable) Sync() error { return nil } -// Close index +// Close close index. func (ht *HashTable) Close() error { for _, table := range ht.tables { err := table.Close() @@ -165,7 +166,7 @@ func (ht *HashTable) Close() error { return nil } -// MatchKeyFunc Set nil if do not need keyPos or value +// MatchKeyFunc Set nil if do not need keyPos or value. func MatchKeyFunc(db *DB, key []byte, keyPos **KeyPosition, value *[]byte) func(slot diskhash.Slot) (bool, error) { return func(slot diskhash.Slot) (bool, error) { chunkPosition := wal.DecodeChunkPosition(slot.Value) diff --git a/hashtable_test.go b/hashtable_test.go index 3aaff845..0ce015af 100644 --- a/hashtable_test.go +++ b/hashtable_test.go @@ -10,10 +10,11 @@ import ( "github.com/rosedblabs/diskhash" "github.com/rosedblabs/wal" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func testMatchFunc(exist bool) diskhash.MatchKeyFunc { - return func(slot diskhash.Slot) (bool, error) { + return func(_ diskhash.Slot) (bool, error) { return exist, nil } } @@ -68,8 +69,11 @@ func TestOpenHashTable(t *testing.T) { _ = os.RemoveAll(tt.options.DirPath) }() db, err := Open(tt.options) - assert.Nil(t, err) - got := db.index.(*HashTable) + require.NoError(t, err) + got, ok := db.index.(*HashTable) + if !ok { + t.Errorf("indexType wrong") + } if (err != nil) != tt.wantErr { t.Errorf("openHashTable() error = %v, wantErr %v", err, tt.wantErr) return @@ -79,7 +83,6 @@ func TestOpenHashTable(t *testing.T) { assert.Equal(t, tt.want.options.partitionNum, got.options.partitionNum) assert.Equal(t, len(tt.want.tables), len(got.tables)) }) - } } @@ -96,13 +99,13 @@ func testHashTablePutBatch(t *testing.T, partitionNum int) { keyHashFunction: xxhash.Sum64, } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() ht, err := openHashIndex(options) - assert.Nil(t, err) + require.NoError(t, err) var keyPositions []*KeyPosition keyPositions = append(keyPositions, &KeyPosition{ @@ -136,7 +139,7 @@ func testHashTablePutBatch(t *testing.T, partitionNum int) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := ht.PutBatch(tt.positions, tt.matchKeyFunc...); (err != nil) != tt.wantErr { + if err = ht.PutBatch(tt.positions, tt.matchKeyFunc...); (err != nil) != tt.wantErr { t.Errorf("HashTable.PutBatch() error = %v, wantErr %v", err, tt.wantErr) } }) @@ -156,12 +159,12 @@ func testHashTableGet(t *testing.T, partitionNum int) { keyHashFunction: xxhash.Sum64, } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() ht, err := openHashIndex(options) - assert.Nil(t, err) + require.NoError(t, err) var keyPositions []*KeyPosition keyPositions = append(keyPositions, &KeyPosition{ key: []byte("exist"), @@ -172,7 +175,7 @@ func testHashTableGet(t *testing.T, partitionNum int) { testMatchFunc(true), testMatchFunc(false), } err = ht.PutBatch(keyPositions, matchKeyFuncs[:1]...) - assert.Nil(t, err) + require.NoError(t, err) tests := []struct { name string @@ -188,7 +191,7 @@ func testHashTableGet(t *testing.T, partitionNum int) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := ht.Get(tt.key, tt.matchKeyFunc...) + _, err = ht.Get(tt.key, tt.matchKeyFunc...) if (err != nil) != tt.wantErr { t.Errorf("HashTable.Get() error = %v, wantErr %v", err, tt.wantErr) return @@ -210,12 +213,12 @@ func testHashTableDeleteBatch(t *testing.T, partitionNum int) { keyHashFunction: xxhash.Sum64, } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() ht, err := openHashIndex(options) - assert.Nil(t, err) + require.NoError(t, err) var keys [][]byte keys = append(keys, nil, []byte("not-exist"), []byte("exist"), []byte("")) @@ -227,7 +230,7 @@ func testHashTableDeleteBatch(t *testing.T, partitionNum int) { position: &wal.ChunkPosition{}, }) err = ht.PutBatch(keyPositions, []diskhash.MatchKeyFunc{testMatchFunc(true)}...) - assert.Nil(t, err) + require.NoError(t, err) matchKeyFuncs := []diskhash.MatchKeyFunc{ testMatchFunc(false), testMatchFunc(true), @@ -245,7 +248,7 @@ func testHashTableDeleteBatch(t *testing.T, partitionNum int) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := ht.DeleteBatch(tt.keys, tt.matchKeyFunc...); (err != nil) != tt.wantErr { + if err = ht.DeleteBatch(tt.keys, tt.matchKeyFunc...); (err != nil) != tt.wantErr { t.Errorf("HashTable.DeleteBatch() error = %v, wantErr %v", err, tt.wantErr) } }) @@ -265,16 +268,16 @@ func testHashTableClose(t *testing.T, partitionNum int) { keyHashFunction: xxhash.Sum64, } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() ht, err := openHashIndex(options) - assert.Nil(t, err) + require.NoError(t, err) err = ht.Close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestHashTable_Sync(t *testing.T) { @@ -290,14 +293,14 @@ func testHashTableSync(t *testing.T, partitionNum int) { keyHashFunction: xxhash.Sum64, } err := os.MkdirAll(options.dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(options.dirPath) }() ht, err := openHashIndex(options) - assert.Nil(t, err) + require.NoError(t, err) err = ht.Sync() - assert.Nil(t, err) + assert.NoError(t, err) } diff --git a/heap.go b/heap.go index fedafed3..b7aeef15 100644 --- a/heap.go +++ b/heap.go @@ -49,9 +49,8 @@ func (ih *iterHeap) Less(i int, j int) bool { } if (*ih)[i].options.Reverse { return bytes.Compare(ki, kj) == 1 - } else { - return bytes.Compare(ki, kj) == -1 } + return bytes.Compare(ki, kj) == -1 } // Swap swaps the elements with indexes i and j. diff --git a/iterator.go b/iterator.go index f378ff7c..35edc322 100644 --- a/iterator.go +++ b/iterator.go @@ -8,7 +8,7 @@ import ( "github.com/rosedblabs/wal" ) -// baseIterator +// baseIterator. type baseIterator interface { // Rewind seek the first key in the iterator. Rewind() @@ -27,7 +27,7 @@ type baseIterator interface { Close() error } -// Iterator holds a heap and a set of iterators that implement the baseIterator interface +// Iterator holds a heap and a set of iterators that implement the baseIterator interface. type Iterator struct { h iterHeap itrs []*singleIter // used for rebuilding heap @@ -125,7 +125,8 @@ func (mi *Iterator) Value() []byte { } }() topIter := mi.h[0] - if topIter.iType == BptreeItr { + switch topIter.iType { + case BptreeItr: keyPos := new(KeyPosition) keyPos.key = topIter.iter.Key() keyPos.partition = uint32(mi.db.vlog.getKeyPartition(topIter.iter.Key())) @@ -135,9 +136,9 @@ func (mi *Iterator) Value() []byte { panic(err) } return record.value - } else if topIter.iType == MemItr { + case MemItr: return topIter.iter.Value().(y.ValueStruct).Value - } else { + default: panic("iType not support") } } @@ -193,7 +194,10 @@ func (db *DB) NewIterator(options IteratorOptions) (*Iterator, error) { itrs := make([]*singleIter, 0, db.options.PartitionNum+len(db.immuMems)+1) itrsM := make(map[int]*singleIter) rank := 0 - index := db.index.(*BPTree) + index, ok := db.index.(*BPTree) + if !ok { + panic("index type not support") + } for i := 0; i < db.options.PartitionNum; i++ { tx, err := index.trees[i].Begin(false) @@ -220,8 +224,8 @@ func (db *DB) NewIterator(options IteratorOptions) (*Iterator, error) { itrsM[rank] = itrs[len(itrs)-1] rank++ } - - memtableList := append(db.immuMems, db.activeMem) + memtableList := make([]*memtable, len(db.immuMems)+1) + copy(memtableList, append(db.immuMems, db.activeMem)) for i := 0; i < len(memtableList); i++ { itr := newMemtableIterator(options, memtableList[i]) itr.Rewind() diff --git a/memtable.go b/memtable.go index 4d282fb5..123438ca 100644 --- a/memtable.go +++ b/memtable.go @@ -2,6 +2,7 @@ package lotusdb import ( "bytes" + "errors" "fmt" "io" "math" @@ -19,7 +20,7 @@ import ( const ( // the wal file name format is .SEG.%d // %d is the unique id of the memtable, used to generate wal file name - // for example, the wal file name of memtable with id 1 is .SEG.1 + // for example, the wal file name of memtable with id 1 is .SEG.1. walFileExt = ".SEG.%d" initialTableID = 1 ) @@ -46,7 +47,7 @@ type ( // memtableOptions represents the configuration options for a memtable. memtableOptions struct { dirPath string // where write ahead log wal file is stored - tableId uint32 // unique id of the memtable, used to generate wal file name + tableID uint32 // unique id of the memtable, used to generate wal file name memSize uint32 // max size of the memtable walBytesPerSync uint32 // flush wal file to disk throughput BytesPerSync parameter walSync bool // WAL flush immediately after each writing @@ -56,7 +57,7 @@ type ( // find the wal file of the memtable with the specified id // a wal is associated with a memtable, so the wal file name is generated by the memtable id -// for example, the wal file name of memtable with id 1 is .SEG.1 +// for example, the wal file name of memtable with id 1 is .SEG.1. func openAllMemtables(options Options) ([]*memtable, error) { entries, err := os.ReadDir(options.DirPath) if err != nil { @@ -71,7 +72,7 @@ func openAllMemtables(options Options) ([]*memtable, error) { } var id int var prefix int - _, err := fmt.Sscanf(entry.Name(), "%d"+walFileExt, &prefix, &id) + _, err = fmt.Sscanf(entry.Name(), "%d"+walFileExt, &prefix, &id) if err != nil { continue } @@ -81,20 +82,19 @@ func openAllMemtables(options Options) ([]*memtable, error) { if len(tableIDs) == 0 { tableIDs = append(tableIDs, initialTableID) } - sort.Ints(tableIDs) tables := make([]*memtable, len(tableIDs)) for i, table := range tableIDs { - table, err := openMemtable(memtableOptions{ + table, errOpenMemtable := openMemtable(memtableOptions{ dirPath: options.DirPath, - tableId: uint32(table), + tableID: uint32(table), memSize: options.MemtableSize, walSync: options.Sync, walBytesPerSync: options.BytesPerSync, walBlockCache: options.BlockCache, }) - if err != nil { - return nil, err + if errOpenMemtable != nil { + return nil, errOpenMemtable } tables[i] = table } @@ -107,6 +107,7 @@ func openAllMemtables(options Options) ([]*memtable, error) { // and load all entries from wal to rebuild the content of the skip list. func openMemtable(options memtableOptions) (*memtable, error) { // init skip list + //nolint:gomnd // default size skl := arenaskl.NewSkiplist(int64(float64(options.memSize) * 1.5)) table := &memtable{options: options, skl: skl} @@ -114,7 +115,7 @@ func openMemtable(options memtableOptions) (*memtable, error) { walFile, err := wal.Open(wal.Options{ DirPath: options.dirPath, SegmentSize: math.MaxInt, // no limit, guarantee that a wal file only contains one segment file - SegmentFileExt: fmt.Sprintf(walFileExt, options.tableId), + SegmentFileExt: fmt.Sprintf(walFileExt, options.tableID), BlockCache: options.walBlockCache, Sync: options.walSync, BytesPerSync: options.walBytesPerSync, @@ -129,26 +130,26 @@ func openMemtable(options memtableOptions) (*memtable, error) { // from wal to rebuild the content of the skip list reader := table.wal.NewReader() for { - chunk, _, err := reader.Next() - if err != nil { - if err == io.EOF { + chunk, _, errNext := reader.Next() + if errNext != nil { + if errors.Is(errNext, io.EOF) { break } - return nil, err + return nil, errNext } record := decodeLogRecord(chunk) if record.Type == LogRecordBatchFinished { - batchId, err := snowflake.ParseBytes(record.Key) - if err != nil { - return nil, err + batchID, errParseBytes := snowflake.ParseBytes(record.Key) + if errParseBytes != nil { + return nil, errParseBytes } - for _, idxRecord := range indexRecords[uint64(batchId)] { + for _, idxRecord := range indexRecords[uint64(batchID)] { table.skl.Put(y.KeyWithTs(idxRecord.Key, 0), y.ValueStruct{Value: idxRecord.Value, Meta: idxRecord.Type}) } - delete(indexRecords, uint64(batchId)) + delete(indexRecords, uint64(batchID)) } else { - indexRecords[record.BatchId] = append(indexRecords[record.BatchId], record) + indexRecords[record.BatchID] = append(indexRecords[record.BatchID], record) } } @@ -158,20 +159,19 @@ func openMemtable(options memtableOptions) (*memtable, error) { // putBatch writes a batch of entries to memtable. func (mt *memtable) putBatch(pendingWrites map[string]*LogRecord, - batchId snowflake.ID, options WriteOptions) error { - + batchID snowflake.ID, options WriteOptions) error { // if wal is not disabled, write to wal first to ensure durability and atomicity if !options.DisableWal { // add record to wal.pendingWrites for _, record := range pendingWrites { - record.BatchId = uint64(batchId) + record.BatchID = uint64(batchID) encRecord := encodeLogRecord(record) mt.wal.PendingWrites(encRecord) } // add a record to indicate the end of the batch endRecord := encodeLogRecord(&LogRecord{ - Key: batchId.Bytes(), + Key: batchID.Bytes(), Type: LogRecordBatchFinished, }) mt.wal.PendingWrites(endRecord) @@ -234,7 +234,7 @@ func (mt *memtable) sync() error { return nil } -// memtableIterator implement baseIterator +// memtableIterator implement baseIterator. type memtableIterator struct { options IteratorOptions iter *arenaskl.UniIterator diff --git a/memtable_test.go b/memtable_test.go index 12a64729..97f7a4e6 100644 --- a/memtable_test.go +++ b/memtable_test.go @@ -2,6 +2,7 @@ package lotusdb import ( "bytes" + "io/fs" "os" "testing" @@ -9,11 +10,12 @@ import ( "github.com/dgraph-io/badger/v4/y" "github.com/lotusdblabs/lotusdb/v2/util" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestMemtableOpen(t *testing.T) { path, err := os.MkdirTemp("", "memtable-test-open") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -21,7 +23,7 @@ func TestMemtableOpen(t *testing.T) { opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, @@ -29,74 +31,79 @@ func TestMemtableOpen(t *testing.T) { } t.Run("open memtable", func(t *testing.T) { - table, err := openMemtable(opts) - assert.Nil(t, err) + var table *memtable + table, err = openMemtable(opts) + assert.NotNil(t, table) + require.NoError(t, err) err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) }) } func TestMemtableOpenAll(t *testing.T) { path, err := os.MkdirTemp("", "memtable-test-open-all") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) }() + var table *memtable for i := 0; i < DefaultOptions.MemtableNums; i++ { opts := memtableOptions{ dirPath: path, - tableId: uint32(i), + tableID: uint32(i), memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, walBlockCache: DefaultOptions.BlockCache, } - table, err := openMemtable(opts) - assert.Nil(t, err) + table, err = openMemtable(opts) + require.NoError(t, err) + assert.NotNil(t, table) err = table.close() - assert.Nil(t, err) + require.NoError(t, err) } t.Run("test open all memtables", func(t *testing.T) { + var tables []*memtable var opts = DefaultOptions opts.DirPath = path - tables, err := openAllMemtables(opts) - assert.Nil(t, err) + tables, err = openAllMemtables(opts) + require.NoError(t, err) + assert.NotNil(t, tables) for _, table := range tables { err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) } }) - } func TestMemTablePutAllKindsEntries(t *testing.T) { path, err := os.MkdirTemp("", "memtable-test-put-all-kinds-entries") - assert.Nil(t, err) - + require.NoError(t, err) + assert.NotEqual(t, "", path) defer func() { _ = os.RemoveAll(path) }() opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, walBlockCache: DefaultOptions.BlockCache, } table, err := openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) writeOpts := WriteOptions{ Sync: false, DisableWal: false, } node, err := snowflake.NewNode(1) - assert.Nil(t, err) + require.NoError(t, err) logs := []*LogRecord{ {Key: []byte("key 0"), Value: []byte("value 0"), Type: LogRecordNormal}, @@ -121,18 +128,18 @@ func TestMemTablePutAllKindsEntries(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := table.putBatch(tt.args.entry, node.Generate(), writeOpts) - assert.Nil(t, err) + err = table.putBatch(tt.args.entry, node.Generate(), writeOpts) + assert.NoError(t, err) }) } err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestMemTablePutBatch(t *testing.T) { path, err := os.MkdirTemp("", "memtable-test-put-batch") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -140,21 +147,21 @@ func TestMemTablePutBatch(t *testing.T) { opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, walBlockCache: DefaultOptions.BlockCache, } table, err := openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) writeOpts := WriteOptions{ Sync: false, DisableWal: false, } node, err := snowflake.NewNode(1) - assert.Nil(t, err) + require.NoError(t, err) pendingWrites := make(map[string]*LogRecord) val := util.RandomValue(512) @@ -164,17 +171,17 @@ func TestMemTablePutBatch(t *testing.T) { } t.Run("test memory table put batch", func(t *testing.T) { - err := table.putBatch(pendingWrites, node.Generate(), writeOpts) - assert.Nil(t, err) + err = table.putBatch(pendingWrites, node.Generate(), writeOpts) + assert.NoError(t, err) }) err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestMemTablePutBatchReopen(t *testing.T) { path, err := os.MkdirTemp("", "memtable-test-put-batch-reopen") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -182,21 +189,21 @@ func TestMemTablePutBatchReopen(t *testing.T) { opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, walBlockCache: DefaultOptions.BlockCache, } table, err := openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) writeOpts := WriteOptions{ Sync: false, DisableWal: false, } node, err := snowflake.NewNode(1) - assert.Nil(t, err) + require.NoError(t, err) pendingWrites := make(map[string]*LogRecord) val := util.RandomValue(512) @@ -206,25 +213,24 @@ func TestMemTablePutBatchReopen(t *testing.T) { } err = table.putBatch(pendingWrites, node.Generate(), writeOpts) - assert.Nil(t, err) + require.NoError(t, err) t.Run("test memory table put batch after reopening", func(t *testing.T) { err = table.close() - assert.Nil(t, err) - table, err := openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) + table, err = openMemtable(opts) + require.NoError(t, err) err = table.putBatch(pendingWrites, node.Generate(), writeOpts) - assert.Nil(t, err) + require.NoError(t, err) }) err = table.close() - assert.Nil(t, err) - + assert.NoError(t, err) } func TestMemTableGet(t *testing.T) { path, err := os.MkdirTemp("", "memtable-test-get") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -232,21 +238,21 @@ func TestMemTableGet(t *testing.T) { opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, walBlockCache: DefaultOptions.BlockCache, } table, err := openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) writeOpts := WriteOptions{ Sync: false, DisableWal: false, } node, err := snowflake.NewNode(1) - assert.Nil(t, err) + require.NoError(t, err) writeLogs := map[string]*LogRecord{ "key 0": {Key: []byte("key 0"), Value: []byte("value 0"), Type: LogRecordNormal}, @@ -260,32 +266,32 @@ func TestMemTableGet(t *testing.T) { } err = table.putBatch(writeLogs, node.Generate(), writeOpts) - assert.Nil(t, err) + require.NoError(t, err) t.Run("get existing log", func(t *testing.T) { for keyStr, log := range writeLogs { del, value := table.get([]byte(keyStr)) - assert.Equal(t, false, del) - assert.Equal(t, log.Value, value) + assert.False(t, del) + assert.Equal(t, value, log.Value) } }) err = table.putBatch(deleteLogs, node.Generate(), writeOpts) - assert.Nil(t, err) + require.NoError(t, err) t.Run("get deleted log", func(t *testing.T) { for keyStr, log := range deleteLogs { del, value := table.get([]byte(keyStr)) - assert.Equal(t, true, del) - assert.Equal(t, log.Value, value) + assert.True(t, del) + assert.Equal(t, value, log.Value) } }) err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestMemTableGetReopen(t *testing.T) { path, err := os.MkdirTemp("", "memtable-test-get-reopen") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -293,7 +299,7 @@ func TestMemTableGetReopen(t *testing.T) { opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, @@ -305,70 +311,69 @@ func TestMemTableGetReopen(t *testing.T) { DisableWal: false, } node, err := snowflake.NewNode(1) - assert.Nil(t, err) - + require.NoError(t, err) t.Run("get existing log reopen", func(t *testing.T) { - table, err := openMemtable(opts) - assert.Nil(t, err) + var table *memtable + table, err = openMemtable(opts) + require.NoError(t, err) writeLogs := map[string]*LogRecord{ "key 0": {Key: []byte("key 0"), Value: []byte("value 0"), Type: LogRecordNormal}, "": {Key: nil, Value: []byte("value 1"), Type: LogRecordNormal}, "key 2": {Key: []byte("key 2"), Value: []byte(""), Type: LogRecordNormal}, } err = table.putBatch(writeLogs, node.Generate(), writeOpts) - assert.Nil(t, err) + require.NoError(t, err) err = table.close() - assert.Nil(t, err) + require.NoError(t, err) table, err = openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) for keyStr, log := range writeLogs { del, value := table.get([]byte(keyStr)) - assert.Equal(t, false, del) + assert.False(t, del) assert.Equal(t, log.Value, value) } err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) }) - t.Run("get deleted log reopen", func(t *testing.T) { - table, err := openMemtable(opts) - assert.Nil(t, err) + var table *memtable + table, err = openMemtable(opts) + require.NoError(t, err) deleteLogs := map[string]*LogRecord{ "key 0": {Key: []byte("key 0"), Value: []byte(""), Type: LogRecordDeleted}, "": {Key: nil, Value: []byte(""), Type: LogRecordDeleted}, "key 2": {Key: []byte("key 2"), Value: []byte(""), Type: LogRecordDeleted}, } err = table.putBatch(deleteLogs, node.Generate(), writeOpts) - assert.Nil(t, err) + require.NoError(t, err) for keyStr, log := range deleteLogs { del, value := table.get([]byte(keyStr)) - assert.Equal(t, true, del) + assert.True(t, del) assert.Equal(t, log.Value, value) } err = table.close() - assert.Nil(t, err) + require.NoError(t, err) table, err = openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) for keyStr, log := range deleteLogs { del, value := table.get([]byte(keyStr)) - assert.Equal(t, true, del) + assert.True(t, del) assert.Equal(t, log.Value, value) } err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) }) - } func TestMemTableDelWal(t *testing.T) { path, err := os.MkdirTemp("", "memtable-test-delete-wal") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -376,30 +381,32 @@ func TestMemTableDelWal(t *testing.T) { opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, walBlockCache: DefaultOptions.BlockCache, } table, err := openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) t.Run("test memtable delete wal", func(t *testing.T) { - err := table.deleteWAl() - assert.Nil(t, err) - entries, err := os.ReadDir(path) - assert.Nil(t, err) - assert.Equal(t, len(entries), 0) + var entries []fs.DirEntry + err = table.deleteWAl() + require.NoError(t, err) + entries, err = os.ReadDir(path) + require.NoError(t, err) + assert.Empty(t, entries) }) err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestMemTableSync(t *testing.T) { + var err error path, err := os.MkdirTemp("", "memtable-test-sync") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -407,21 +414,21 @@ func TestMemTableSync(t *testing.T) { opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, walBlockCache: DefaultOptions.BlockCache, } table, err := openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) writeOpts := WriteOptions{ Sync: false, DisableWal: false, } node, err := snowflake.NewNode(1) - assert.Nil(t, err) + require.NoError(t, err) pendingWrites := make(map[string]*LogRecord) val := util.RandomValue(512) @@ -429,22 +436,22 @@ func TestMemTableSync(t *testing.T) { log := &LogRecord{Key: util.GetTestKey(i), Value: val} pendingWrites[string(log.Key)] = log } - err = table.putBatch(pendingWrites, node.Generate(), writeOpts) - assert.Nil(t, err) + require.NoError(t, err) t.Run("test memtable delete wal", func(t *testing.T) { - err := table.sync() - assert.Nil(t, err) + err = table.sync() + assert.NoError(t, err) }) err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestMemtableClose(t *testing.T) { + var err error path, err := os.MkdirTemp("", "memtable-test-close") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -452,7 +459,7 @@ func TestMemtableClose(t *testing.T) { opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, @@ -460,17 +467,17 @@ func TestMemtableClose(t *testing.T) { } table, err := openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) t.Run("open memtable", func(t *testing.T) { err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) }) } func TestNewMemtableIterator(t *testing.T) { path, err := os.MkdirTemp("", "memtable-test-iterator-new") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -478,7 +485,7 @@ func TestNewMemtableIterator(t *testing.T) { opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, @@ -488,23 +495,24 @@ func TestNewMemtableIterator(t *testing.T) { table, err := openMemtable(opts) defer func() { err = table.close() - assert.Nil(t, err) + assert.NoError(t, err) }() - assert.Nil(t, err) + require.NoError(t, err) options := IteratorOptions{ Reverse: false, } iter := newMemtableIterator(options, table) - assert.Nil(t, err) + require.NoError(t, err) err = iter.Close() - assert.Nil(t, err) + assert.NoError(t, err) } func Test_memtableIterator(t *testing.T) { + var err error path, err := os.MkdirTemp("", "memtable-test-iterator-rewind") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -512,21 +520,21 @@ func Test_memtableIterator(t *testing.T) { opts := memtableOptions{ dirPath: path, - tableId: 0, + tableID: 0, memSize: DefaultOptions.MemtableSize, walBytesPerSync: DefaultOptions.BytesPerSync, walSync: DefaultBatchOptions.Sync, walBlockCache: DefaultOptions.BlockCache, } table, err := openMemtable(opts) - assert.Nil(t, err) + require.NoError(t, err) writeOpts := WriteOptions{ Sync: false, DisableWal: false, } node, err := snowflake.NewNode(1) - assert.Nil(t, err) + require.NoError(t, err) writeLogs := map[string]*LogRecord{ "key 0": {Key: []byte("key 0"), Value: []byte("value 0"), Type: LogRecordNormal}, "key 1": {Key: nil, Value: []byte("value 1"), Type: LogRecordNormal}, @@ -538,13 +546,13 @@ func Test_memtableIterator(t *testing.T) { "abc 1": {Key: []byte("abc 1"), Value: []byte(""), Type: LogRecordNormal}, } err = table.putBatch(writeLogs, node.Generate(), writeOpts) - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions := IteratorOptions{ Reverse: false, } itr := newMemtableIterator(iteratorOptions, table) - assert.Nil(t, err) + require.NoError(t, err) var prev []byte itr.Rewind() for itr.Valid() { @@ -556,12 +564,12 @@ func Test_memtableIterator(t *testing.T) { itr.Next() } err = itr.Close() - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions.Reverse = true prev = nil itr = newMemtableIterator(iteratorOptions, table) - assert.Nil(t, err) + require.NoError(t, err) itr.Rewind() for itr.Valid() { currKey := itr.Key() @@ -572,11 +580,11 @@ func Test_memtableIterator(t *testing.T) { itr.Next() } err = itr.Close() - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions.Reverse = false itr = newMemtableIterator(iteratorOptions, table) - assert.Nil(t, err) + require.NoError(t, err) itr.Seek([]byte("key 0")) assert.Equal(t, []byte("key 0"), itr.Key()) itr.Seek([]byte("key 4")) @@ -585,11 +593,11 @@ func Test_memtableIterator(t *testing.T) { itr.Seek([]byte("aye 2")) assert.Equal(t, []byte("key 0"), itr.Key()) err = itr.Close() - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions.Reverse = true itr = newMemtableIterator(iteratorOptions, table) - assert.Nil(t, err) + require.NoError(t, err) itr.Seek([]byte("key 4")) assert.Equal(t, []byte("key 2"), itr.Key()) @@ -600,25 +608,25 @@ func Test_memtableIterator(t *testing.T) { assert.False(t, itr.Valid()) err = itr.Close() - assert.Nil(t, err) + require.NoError(t, err) // prefix err = table.putBatch(writeLogs2, node.Generate(), writeOpts) - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions.Reverse = false iteratorOptions.Prefix = []byte("not valid") itr = newMemtableIterator(iteratorOptions, table) - assert.Nil(t, err) + require.NoError(t, err) itr.Rewind() assert.False(t, itr.Valid()) err = itr.Close() - assert.Nil(t, err) + require.NoError(t, err) iteratorOptions.Reverse = false iteratorOptions.Prefix = []byte("abc") itr = newMemtableIterator(iteratorOptions, table) - assert.Nil(t, err) + require.NoError(t, err) itr.Rewind() for itr.Valid() { assert.True(t, bytes.HasPrefix(itr.Key(), iteratorOptions.Prefix)) @@ -626,6 +634,5 @@ func Test_memtableIterator(t *testing.T) { itr.Next() } err = itr.Close() - assert.Nil(t, err) - + assert.NoError(t, err) } diff --git a/options.go b/options.go index ac7e3e87..64b66e3f 100644 --- a/options.go +++ b/options.go @@ -114,17 +114,22 @@ const ( ) var DefaultOptions = Options{ - DirPath: tempDBDir(), - MemtableSize: 64 * MB, - MemtableNums: 15, - BlockCache: 0, - Sync: false, - BytesPerSync: 0, - PartitionNum: 3, - KeyHashFunction: xxhash.Sum64, - ValueLogFileSize: 1 * GB, - IndexType: BTree, - CompactBatchCount: 10000, + DirPath: tempDBDir(), + //nolint:gomnd // default + MemtableSize: 64 * MB, + //nolint:gomnd // default + MemtableNums: 15, + BlockCache: 0, + Sync: false, + BytesPerSync: 0, + //nolint:gomnd // default + PartitionNum: 3, + KeyHashFunction: xxhash.Sum64, + ValueLogFileSize: 1 * GB, + IndexType: BTree, + //nolint:gomnd // default + CompactBatchCount: 10000, + //nolint:gomnd // default WaitMemSpaceTimeout: 100 * time.Millisecond, } diff --git a/structs.go b/structs.go index 51bb4cb2..16068a20 100644 --- a/structs.go +++ b/structs.go @@ -30,7 +30,7 @@ type LogRecord struct { Key []byte Value []byte Type LogRecordType - BatchId uint64 + BatchID uint64 } // +-------------+-------------+-------------+--------------+-------------+--------------+ @@ -45,7 +45,7 @@ func encodeLogRecord(logRecord *LogRecord) []byte { var index = 1 // batch id - index += binary.PutUvarint(header[index:], logRecord.BatchId) + index += binary.PutUvarint(header[index:], logRecord.BatchID) // key size index += binary.PutVarint(header[index:], int64(len(logRecord.Key))) // value size @@ -70,7 +70,7 @@ func decodeLogRecord(buf []byte) *LogRecord { var index uint32 = 1 // batch id - batchId, n := binary.Uvarint(buf[index:]) + batchID, n := binary.Uvarint(buf[index:]) index += uint32(n) // key size @@ -83,15 +83,15 @@ func decodeLogRecord(buf []byte) *LogRecord { // copy key key := make([]byte, keySize) - copy(key[:], buf[index:index+uint32(keySize)]) + copy(key, buf[index:index+uint32(keySize)]) index += uint32(keySize) // copy value value := make([]byte, valueSize) - copy(value[:], buf[index:index+uint32(valueSize)]) + copy(value, buf[index:index+uint32(valueSize)]) return &LogRecord{Key: key, Value: value, - BatchId: batchId, Type: recordType} + BatchID: batchID, Type: recordType} } // KeyPosition is the position of the key in the value log. @@ -109,9 +109,10 @@ type ValueLogRecord struct { func encodeValueLogRecord(record *ValueLogRecord) []byte { buf := make([]byte, 4+len(record.key)+len(record.value)) + keySize := 4 index := 0 - binary.LittleEndian.PutUint32(buf[index:index+4], uint32(len(record.key))) - index += 4 + binary.LittleEndian.PutUint32(buf[index:keySize], uint32(len(record.key))) + index += keySize copy(buf[index:index+len(record.key)], record.key) index += len(record.key) @@ -120,10 +121,11 @@ func encodeValueLogRecord(record *ValueLogRecord) []byte { } func decodeValueLogRecord(buf []byte) *ValueLogRecord { - keyLen := binary.LittleEndian.Uint32(buf[:4]) + var keySize uint32 = 4 + keyLen := binary.LittleEndian.Uint32(buf[:keySize]) key := make([]byte, keyLen) - copy(key, buf[4:4+keyLen]) - value := make([]byte, uint32(len(buf))-keyLen-4) - copy(value, buf[4+keyLen:]) + copy(key, buf[keySize:keySize+keyLen]) + value := make([]byte, uint32(len(buf))-keyLen-keySize) + copy(value, buf[keySize+keyLen:]) return &ValueLogRecord{key: key, value: value} } diff --git a/util/file_test.go b/util/file_test.go index b025d4ab..3b1aba02 100644 --- a/util/file_test.go +++ b/util/file_test.go @@ -6,29 +6,30 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDirSize(t *testing.T) { dirPath, err := os.MkdirTemp("", "db-test-DirSize") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(dirPath) }() err = os.MkdirAll(dirPath, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) file, err := os.Create(fmt.Sprintf("%s/test.txt", dirPath)) - assert.Nil(t, err) + require.NoError(t, err) byteSilces := []byte("test") _, err = file.Write(byteSilces) - assert.Nil(t, err) + require.NoError(t, err) err = file.Close() - assert.Nil(t, err) + require.NoError(t, err) t.Run("test DirSize", func(t *testing.T) { - size, err := DirSize(dirPath) - assert.Nil(t, err) + size, errDirSize := DirSize(dirPath) + require.NoError(t, errDirSize) assert.Greater(t, size, int64(0)) }) } diff --git a/util/rand_kv.go b/util/rand_kv.go index 709fbf1a..ea1d97d9 100644 --- a/util/rand_kv.go +++ b/util/rand_kv.go @@ -8,7 +8,8 @@ import ( ) var ( - lock = sync.Mutex{} + lock = sync.Mutex{} + //nolint:gosec //used in test randStr = rand.New(rand.NewSource(time.Now().Unix())) letters = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") ) diff --git a/util/rand_kv_test.go b/util/rand_kv_test.go index 38a76a20..1a69ced9 100644 --- a/util/rand_kv_test.go +++ b/util/rand_kv_test.go @@ -1,8 +1,9 @@ package util import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/testify/assert" ) func TestGetTestKey(t *testing.T) { diff --git a/vlog.go b/vlog.go index ed7d24f7..b47dc923 100644 --- a/vlog.go +++ b/vlog.go @@ -43,7 +43,7 @@ type valueLogOptions struct { } // open wal files for value log, it will open several wal files for concurrent writing and reading -// the number of wal files is specified by the partitionNum +// the number of wal files is specified by the partitionNum. func openValueLog(options valueLogOptions) (*valueLog, error) { var walFiles []*wal.WAL @@ -65,7 +65,7 @@ func openValueLog(options valueLogOptions) (*valueLog, error) { return &valueLog{walFiles: walFiles, options: options}, nil } -// read the value log record from the specified position +// read the value log record from the specified position. func (vlog *valueLog) read(pos *KeyPosition) (*ValueLogRecord, error) { buf, err := vlog.walFiles[pos.partition].Read(pos.position) if err != nil { @@ -94,7 +94,8 @@ func (vlog *valueLog) writeBatch(records []*ValueLogRecord) ([]*KeyPosition, err } part := i - g.Go(func() (err error) { + g.Go(func() error { + var err error defer func() { if err != nil { vlog.walFiles[part].ClearPendingWrites() @@ -107,7 +108,7 @@ func (vlog *valueLog) writeBatch(records []*ValueLogRecord) ([]*KeyPosition, err select { case <-ctx.Done(): err = ctx.Err() - return + return err default: vlog.walFiles[part].PendingWrites(encodeValueLogRecord(record)) } @@ -143,7 +144,7 @@ func (vlog *valueLog) writeBatch(records []*ValueLogRecord) ([]*KeyPosition, err return keyPositions, nil } -// sync the value log to disk +// sync the value log to disk. func (vlog *valueLog) sync() error { for _, walFile := range vlog.walFiles { if err := walFile.Sync(); err != nil { @@ -153,7 +154,7 @@ func (vlog *valueLog) sync() error { return nil } -// close the value log +// close the value log. func (vlog *valueLog) close() error { for _, walFile := range vlog.walFiles { if err := walFile.Close(); err != nil { diff --git a/vlog_test.go b/vlog_test.go index e7bb1def..4e0ee1aa 100644 --- a/vlog_test.go +++ b/vlog_test.go @@ -7,13 +7,14 @@ import ( "github.com/lotusdblabs/lotusdb/v2/util" "github.com/rosedblabs/wal" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestOpenValueLog(t *testing.T) { path, err := os.MkdirTemp("", "vlog-test-open") - assert.Nil(t, err) + require.NoError(t, err) err = os.MkdirAll(path, os.ModePerm) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -27,16 +28,16 @@ func TestOpenValueLog(t *testing.T) { hashKeyFunction: DefaultOptions.KeyHashFunction, } t.Run("open vlog files", func(t *testing.T) { - vlog, err := openValueLog(opts) - assert.Nil(t, err) + vlog, errOpen := openValueLog(opts) + require.NoError(t, errOpen) err = vlog.close() - assert.Nil(t, err) + assert.NoError(t, err) }) } func TestValueLogWriteAllKindsEntries(t *testing.T) { path, err := os.MkdirTemp("", "vlog-test-write-entries") - assert.Nil(t, err) + require.NoError(t, err) opts := valueLogOptions{ dirPath: path, segmentSize: GB, @@ -46,7 +47,7 @@ func TestValueLogWriteAllKindsEntries(t *testing.T) { } vlog, err := openValueLog(opts) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -79,8 +80,8 @@ func TestValueLogWriteAllKindsEntries(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logs := []*ValueLogRecord{tt.args.log} - _, err := vlog.writeBatch(logs) + logsT := []*ValueLogRecord{tt.args.log} + _, err = vlog.writeBatch(logsT) if (err != nil) != tt.wantErr { t.Errorf("writeBatch() error = %v, wantErr = %v", err, tt.wantErr) } @@ -88,12 +89,12 @@ func TestValueLogWriteAllKindsEntries(t *testing.T) { } err = vlog.close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestValueLogWriteBatch(t *testing.T) { path, err := os.MkdirTemp("", "vlog-test-write-batch") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -122,19 +123,18 @@ func TestValueLogWriteBatch(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { for _, numRW := range numRWList { - err := writeBatch(opts, numRW, tt.numPart) + err = writeBatch(opts, numRW, tt.numPart) if (err != nil) != tt.wantErr { t.Errorf("writeBatch() error = %v, wantErr = %v", err, tt.wantErr) } } }) } - } func TestValueLogWriteBatchReopen(t *testing.T) { path, err := os.MkdirTemp("", "vlog-test-write-batch-reopen") - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -151,11 +151,11 @@ func TestValueLogWriteBatchReopen(t *testing.T) { numRW := 50000 numPart := 10 err = writeBatch(opts, numRW, numPart) - assert.Nil(t, err) + require.NoError(t, err) t.Run("writeBatch after reopening", func(t *testing.T) { err = writeBatch(opts, numRW, numPart) - assert.Nil(t, err) + assert.NoError(t, err) }) } @@ -183,7 +183,7 @@ func writeBatch(opts valueLogOptions, numRW int, numPart int) error { func TestValueLogRead(t *testing.T) { path, err := os.MkdirTemp("", "vlog-test-read") - assert.Nil(t, err) + require.NoError(t, err) opts := valueLogOptions{ dirPath: path, segmentSize: GB, @@ -193,7 +193,7 @@ func TestValueLogRead(t *testing.T) { } vlog, err := openValueLog(opts) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -229,13 +229,13 @@ func TestValueLogRead(t *testing.T) { }, } - pos, err := vlog.writeBatch(logs[:]) - assert.Nil(t, err) + pos, err := vlog.writeBatch(logs) + require.NoError(t, err) for i, tt := range tests { t.Run(tt.name, func(t *testing.T) { - readLogs, err := vlog.read(pos[i]) - if (err != nil) != tt.wantErr { + readLogs, errRead := vlog.read(pos[i]) + if (errRead != nil) != tt.wantErr { t.Errorf("read(pos) error = %v, wantErr = %v", err, tt.wantErr) } assert.Equal(t, kv[string(pos[i].key)], string(readLogs.value)) @@ -243,12 +243,12 @@ func TestValueLogRead(t *testing.T) { } err = vlog.close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestValueLogReadReopen(t *testing.T) { path, err := os.MkdirTemp("", "vlog-test-read-reopen") - assert.Nil(t, err) + require.NoError(t, err) opts := valueLogOptions{ dirPath: path, segmentSize: GB, @@ -258,7 +258,7 @@ func TestValueLogReadReopen(t *testing.T) { } vlog, err := openValueLog(opts) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) @@ -275,27 +275,27 @@ func TestValueLogReadReopen(t *testing.T) { "key 3": "", } - pos, err := vlog.writeBatch(logs[:]) - assert.Nil(t, err) + pos, err := vlog.writeBatch(logs) + require.NoError(t, err) err = vlog.close() - assert.Nil(t, err) + require.NoError(t, err) t.Run("read after reopening vlog", func(t *testing.T) { vlog, err = openValueLog(opts) - assert.Nil(t, err) + require.NoError(t, err) for i := 0; i < len(logs); i++ { - record, err := vlog.read(pos[i]) - assert.Nil(t, err) + record, errRead := vlog.read(pos[i]) + require.NoError(t, errRead) assert.Equal(t, kv[string(pos[i].key)], string(record.value)) } err = vlog.close() - assert.Nil(t, err) + assert.NoError(t, err) }) } func TestValueLogSync(t *testing.T) { path, err := os.MkdirTemp("", "vlog-test-sync") - assert.Nil(t, err) + require.NoError(t, err) opts := valueLogOptions{ dirPath: path, segmentSize: GB, @@ -309,22 +309,22 @@ func TestValueLogSync(t *testing.T) { }() vlog, err := openValueLog(opts) - assert.Nil(t, err) + require.NoError(t, err) _, err = vlog.writeBatch([]*ValueLogRecord{{key: []byte("key"), value: []byte("value")}}) - assert.Nil(t, err) + require.NoError(t, err) t.Run("test value log sync", func(t *testing.T) { - err := vlog.sync() - assert.Nil(t, err) + errSync := vlog.sync() + assert.NoError(t, errSync) }) err = vlog.close() - assert.Nil(t, err) + assert.NoError(t, err) } func TestValueLogClose(t *testing.T) { path, err := os.MkdirTemp("", "vlog-test-close") - assert.Nil(t, err) + require.NoError(t, err) opts := valueLogOptions{ dirPath: path, segmentSize: GB, @@ -334,21 +334,21 @@ func TestValueLogClose(t *testing.T) { } vlog, err := openValueLog(opts) - assert.Nil(t, err) + require.NoError(t, err) defer func() { _ = os.RemoveAll(path) }() t.Run("test close value log", func(t *testing.T) { - err := vlog.close() - assert.Nil(t, err) + errClose := vlog.close() + assert.NoError(t, errClose) }) } func TestValueLogMultiSegmentFiles(t *testing.T) { path, err := os.MkdirTemp("", "vlog-test-multi-segment") - assert.Nil(t, err) + require.NoError(t, err) opts := valueLogOptions{ dirPath: path, segmentSize: 100 * MB, @@ -374,8 +374,8 @@ func TestValueLogMultiSegmentFiles(t *testing.T) { _ = os.RemoveAll(path) }() - vlog, err := openValueLog(opts) - assert.Nil(t, err) + vlog, errOpen := openValueLog(opts) + require.NoError(t, errOpen) var logs []*ValueLogRecord numLogs := (tt.NumSeg - 0.5) * 100 @@ -388,12 +388,12 @@ func TestValueLogMultiSegmentFiles(t *testing.T) { _, err = vlog.writeBatch(logs) assert.Equal(t, tt.want, err) - entries, err := os.ReadDir(path) - assert.Nil(t, err) - assert.Equal(t, tt.wantNumSeg, len(entries)) + entries, errRead := os.ReadDir(path) + require.NoError(t, errRead) + assert.Len(t, entries, tt.wantNumSeg) err = vlog.close() - assert.Nil(t, err) + assert.NoError(t, err) }) } }