Skip to content
This repository has been archived by the owner on Aug 2, 2021. It is now read-only.

Fix data races in TestLDBStoreCollectGarbage #1123

Closed
wants to merge 4 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 13 additions & 14 deletions swarm/storage/ldbstore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
"strconv"
"strings"
"testing"
"time"

"github.com/ethereum/go-ethereum/common"
ch "github.com/ethereum/go-ethereum/swarm/chunk"
Expand Down Expand Up @@ -388,11 +387,11 @@ func testLDBStoreCollectGarbage(t *testing.T) {
t.Fatal(err.Error())
}
allChunks = append(allChunks, chunks...)
ldb.lock.RLock()
log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
ldb.lock.RUnlock()

ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
waitGc(ctx, ldb)
waitGc(ldb)
}

// attempt gets on all put chunks
Expand Down Expand Up @@ -466,6 +465,7 @@ func TestLDBStoreAddRemove(t *testing.T) {
}

func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
t.Skip("flaky with -race flag")

params := strings.Split(t.Name(), "/")
capacity, err := strconv.Atoi(params[2])
Expand Down Expand Up @@ -496,9 +496,7 @@ func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
}
}

ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
waitGc(ctx, ldb)
waitGc(ldb)

// delete all chunks
// (only count the ones actually deleted, the rest will have been gc'd)
Expand Down Expand Up @@ -537,14 +535,14 @@ func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
remaining -= putCount
for putCount > 0 {
ldb.Put(context.TODO(), chunks[puts])
ldb.lock.RLock()
log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
ldb.lock.RUnlock()
puts++
putCount--
}

ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
waitGc(ctx, ldb)
waitGc(ldb)
}

// expect first surplus chunks to be missing, because they have the smallest access value
Expand Down Expand Up @@ -597,9 +595,7 @@ func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
}

// wait for garbage collection to kick in on the responsible actor
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
waitGc(ctx, ldb)
waitGc(ldb)

var missing int
for i, ch := range chunks[2 : capacity/2] {
Expand Down Expand Up @@ -788,7 +784,10 @@ func TestCleanIndex(t *testing.T) {
}
}

func waitGc(ctx context.Context, ldb *LDBStore) {
// Note: waitGc does not guarantee that we wait 1 GC round; it only
// guarantees that if the GC is running we wait for that run to finish
// ticket: https://github.com/ethersphere/go-ethereum/issues/1151
func waitGc(ldb *LDBStore) {
<-ldb.gc.runC
ldb.gc.runC <- struct{}{}
}