diff --git a/cache_test.go b/cache_test.go index 96bef52..8f56f3a 100644 --- a/cache_test.go +++ b/cache_test.go @@ -165,6 +165,7 @@ func TestCache_SetWithTTL(t *testing.T) { var mutex sync.Mutex m := make(map[DeletionCause]int) c, err := MustBuilder[int, int](size). + CollectStats(). InitialCapacity(size). WithTTL(time.Second). DeletionListener(func(key int, value int, cause DeletionCause) { @@ -199,6 +200,14 @@ func TestCache_SetWithTTL(t *testing.T) { mutex.Unlock() t.Fatalf("cache was supposed to expire %d, but expired %d entries", size, e) } + if c.Stats().EvictedCount() != int64(m[Expired]) { + mutex.Unlock() + t.Fatalf( + "Eviction statistics are not collected for expiration. EvictedCount: %d, expired entries: %d", + c.Stats().EvictedCount(), + m[Expired], + ) + } mutex.Unlock() m = make(map[DeletionCause]int) @@ -240,6 +249,14 @@ func TestCache_SetWithTTL(t *testing.T) { if len(m) != 1 || m[Expired] != size { t.Fatalf("cache was supposed to expire %d, but expired %d entries", size, m[Expired]) } + if c.Stats().EvictedCount() != int64(m[Expired]) { + mutex.Unlock() + t.Fatalf( + "Eviction statistics are not collected for expiration. EvictedCount: %d, expired entries: %d", + c.Stats().EvictedCount(), + m[Expired], + ) + } } func TestCache_Delete(t *testing.T) { diff --git a/internal/core/cache.go b/internal/core/cache.go index bac512d..761a3ab 100644 --- a/internal/core/cache.go +++ b/internal/core/cache.go @@ -381,6 +381,8 @@ func (c *Cache[K, V]) deleteExpiredNode(n node.Node[K, V]) { if deleted != nil { n.Die() c.notifyDeletion(n.Key(), n.Value(), Expired) + c.stats.IncEvictedCount() + c.stats.AddEvictedCost(n.Cost()) } } diff --git a/internal/hashtable/map.go b/internal/hashtable/map.go index fec5fa6..01d261c 100644 --- a/internal/hashtable/map.go +++ b/internal/hashtable/map.go @@ -83,11 +83,13 @@ type table[K comparable] struct { } func (t *table[K]) addSize(bucketIdx uint64, delta int) { + //nolint:gosec // there will never be an overflow counterIdx := uint64(len(t.size)-1) & bucketIdx atomic.AddInt64(&t.size[counterIdx].c, int64(delta)) } func (t *table[K]) addSizePlain(bucketIdx uint64, delta int) { + //nolint:gosec // there will never be an overflow counterIdx := uint64(len(t.size)-1) & bucketIdx t.size[counterIdx].c += int64(delta) } @@ -159,6 +161,7 @@ func newTable[K comparable](bucketCount int, prevHasher maphash.Hasher[K]) *tabl counterLength = maxCounterLength } counter := make([]paddedCounter, counterLength) + //nolint:gosec // there will never be an overflow mask := uint64(len(buckets) - 1) t := &table[K]{ buckets: buckets, @@ -435,6 +438,7 @@ func (m *Map[K, V]) resize(known *table[K], hint resizeHint) { if hint != clearHint { for i := 0; i < tableLen; i++ { copied := m.copyBuckets(&t.buckets[i], nt) + //nolint:gosec // there will never be an overflow nt.addSizePlain(uint64(i), copied) } }