forked from hashicorp/golang-lru
-
Notifications
You must be signed in to change notification settings - Fork 0
/
arc.go
258 lines (225 loc) · 5.65 KB
/
arc.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
// ARC is an enhancement over the standard LRU cache in that tracks both
// frequency and recency of use. This avoids a burst in access to new
// entries from evicting the frequently used older entries. It adds some
// additional tracking overhead to a standard LRU cache, computationally
// it is roughly 2x the cost, and the extra memory overhead is linear
// with the size of the cache. ARC has been patented by IBM, but is
// similar to the TwoQueueCache (2Q) which requires setting parameters.
type ARCCache[K comparable, V any] struct {
size int // Size is the total capacity of the cache
p int // P is the dynamic preference towards T1 or T2
t1 simplelru.LRUCache[K, V] // T1 is the LRU for recently accessed items
b1 simplelru.LRUCache[K, V] // B1 is the LRU for evictions from t1
t2 simplelru.LRUCache[K, V] // T2 is the LRU for frequently accessed items
b2 simplelru.LRUCache[K, V] // B2 is the LRU for evictions from t2
lock sync.RWMutex
}
// NewARC creates an ARC of the given size
func NewARC[K comparable, V any](size int) (*ARCCache[K, V], error) {
// Create the sub LRUs
b1, err := simplelru.NewLRU[K, V](size, nil)
if err != nil {
return nil, err
}
b2, err := simplelru.NewLRU[K, V](size, nil)
if err != nil {
return nil, err
}
t1, err := simplelru.NewLRU[K, V](size, nil)
if err != nil {
return nil, err
}
t2, err := simplelru.NewLRU[K, V](size, nil)
if err != nil {
return nil, err
}
// Initialize the ARC
c := &ARCCache[K, V]{
size: size,
p: 0,
t1: t1,
b1: b1,
t2: t2,
b2: b2,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *ARCCache[K, V]) Get(key K) (value V, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// If the value is contained in T1 (recent), then
// promote it to T2 (frequent)
if val, ok := c.t1.Peek(key); ok {
c.t1.Remove(key)
c.t2.Add(key, val)
return val, ok
}
// Check if the value is contained in T2 (frequent)
if val, ok := c.t2.Get(key); ok {
return val, ok
}
// No hit
return
}
// Add adds a value to the cache.
func (c *ARCCache[K, V]) Add(key K, value V) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is contained in T1 (recent), and potentially
// promote it to frequent T2
if c.t1.Contains(key) {
c.t1.Remove(key)
c.t2.Add(key, value)
return
}
// Check if the value is already in T2 (frequent) and update it
if c.t2.Contains(key) {
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// recently used list
if c.b1.Contains(key) {
// T1 set is too small, increase P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b2Len > b1Len {
delta = b2Len / b1Len
}
if c.p+delta >= c.size {
c.p = c.size
} else {
c.p += delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Remove from B1
c.b1.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// frequently used list
if c.b2.Contains(key) {
// T2 set is too small, decrease P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b1Len > b2Len {
delta = b1Len / b2Len
}
if delta >= c.p {
c.p = 0
} else {
c.p -= delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(true)
}
// Remove from B2
c.b2.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Keep the size of the ghost buffers trim
if c.b1.Len() > c.size-c.p {
c.b1.RemoveOldest()
}
if c.b2.Len() > c.p {
c.b2.RemoveOldest()
}
// Add to the recently seen list
c.t1.Add(key, value)
}
// replace is used to adaptively evict from either T1 or T2
// based on the current learned value of P
func (c *ARCCache[K, V]) replace(b2ContainsKey bool) {
t1Len := c.t1.Len()
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
k, _, ok := c.t1.RemoveOldest()
if ok {
var empty V
c.b1.Add(k, empty)
}
} else {
k, _, ok := c.t2.RemoveOldest()
if ok {
var empty V
c.b2.Add(k, empty)
}
}
}
// Len returns the number of cached entries
func (c *ARCCache[K, V]) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Len() + c.t2.Len()
}
// Keys returns all the cached keys
func (c *ARCCache[K, V]) Keys() []K {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.t1.Keys()
k2 := c.t2.Keys()
return append(k1, k2...)
}
// Remove is used to purge a key from the cache
func (c *ARCCache[K, V]) Remove(key K) {
c.lock.Lock()
defer c.lock.Unlock()
if c.t1.Remove(key) {
return
}
if c.t2.Remove(key) {
return
}
if c.b1.Remove(key) {
return
}
if c.b2.Remove(key) {
return
}
}
// Purge is used to clear the cache
func (c *ARCCache[K, V]) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.t1.Purge()
c.t2.Purge()
c.b1.Purge()
c.b2.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *ARCCache[K, V]) Contains(key K) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Contains(key) || c.t2.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *ARCCache[K, V]) Peek(key K) (value V, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.t1.Peek(key); ok {
return val, ok
}
return c.t2.Peek(key)
}